summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-01-28 21:18:40 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2017-01-30 16:34:50 +1100
commit79270e0a3fd124388a0407f9edbd6ace75eacb69 (patch)
treeab6557f47a9a3f1f68473aca2b13a7f3c189aa4f
parent10528b9c45cfb9e8f45217ef2f5ef8b876bbd3f5 (diff)
powerpc/mm/hash: Properly mask the ESID bits when building proto VSID
The proto VSID is built using both the MMU context id and effective segment ID (ESID). We should not have overlapping bits between those. That could result in us having a VSID collision. With the current code we missed masking the top bits of the ESID. This implies for kernel address we ended up using the top 4 bits of the ESID as part of the proto VSID, which is wrong. The current code use the top 4 context values (0x7fffc - 0x7ffff) for the kernel. With those context IDs used for the kernel, we don't run into VSID collisions because we get the same proto VSID irrespective of whether we mask the ESID bits or not. eg: ea = 0xf000000000000000 context = 0x7ffff w/out masking: proto_vsid = (0x7ffff << 6 | 0xf000000000000000 >> 40) = (0x1ffffc0 | 0xf00000) = 0x1ffffc0 with masking: proto_vsid = (0x7ffff << 6 | ((0xf000000000000000 >> 40) & 0x3f)) = (0x1ffffc0 | (0xf00000 & 0x3f)) = 0x1ffffc0 | 0) = 0x1ffffc0 So although there is no bug, the code is still overly subtle, so fix it to save ourselves pain in future. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 2e6a823fa502..823015cff149 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -525,6 +525,9 @@ extern void slb_set_size(u16 size);
#define ESID_BITS 18
#define ESID_BITS_1T 6
+#define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
+#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
+
/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
@@ -660,9 +663,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
if (ssize == MMU_SEGSIZE_256M)
return vsid_scramble((context << ESID_BITS)
- | (ea >> SID_SHIFT), 256M);
+ | ((ea >> SID_SHIFT) & ESID_BITS_MASK), 256M);
return vsid_scramble((context << ESID_BITS_1T)
- | (ea >> SID_SHIFT_1T), 1T);
+ | ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK), 1T);
}
/*