diff options
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/mmu-hash.h | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/copro_fault.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 2 |
5 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 7faa3d7214c0..1d1183048cfd 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -89,7 +89,7 @@ * Region IDs */ #define USER_REGION_ID 0 -#define KERNEL_REGION_ID 1 +#define LINEAR_MAP_REGION_ID 1 #define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START) #define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START) #define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START) @@ -120,7 +120,7 @@ static inline int get_region_id(unsigned long ea) return USER_REGION_ID; if (ea < H_KERN_VIRT_START) - return KERNEL_REGION_ID; + return LINEAR_MAP_REGION_ID; VM_BUG_ON(id != 0xc); BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2); diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 9a9adbeef070..1e4705516a54 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -817,7 +817,7 @@ static inline unsigned long get_kernel_context(unsigned long ea) * Depending on Kernel config, kernel region can have one context * or more. */ - if (region_id == KERNEL_REGION_ID) { + if (region_id == LINEAR_MAP_REGION_ID) { /* * We already verified ea to be not beyond the addr limit. */ diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 9b0321061bc8..f137286740cb 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -129,8 +129,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsidkey = SLB_VSID_KERNEL; break; - case KERNEL_REGION_ID: - pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea); + case LINEAR_MAP_REGION_ID: + pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea); psize = mmu_linear_psize; ssize = mmu_kernel_ssize; vsid = get_kernel_vsid(ea, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 721cb09c9044..89e4531de64b 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -691,7 +691,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id) unsigned long flags; int ssize; - if (id == KERNEL_REGION_ID) { + if (id == LINEAR_MAP_REGION_ID) { /* We only support upto MAX_PHYSMEM_BITS */ if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS)) @@ -790,7 +790,7 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea) * first class kernel code. But for performance it's probably nicer * if they go via fast_exception_return too. */ - if (id >= KERNEL_REGION_ID) { + if (id >= LINEAR_MAP_REGION_ID) { long err; #ifdef CONFIG_DEBUG_VM /* Catch recursive kernel SLB faults. */ diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 4770cce1bfe2..6646f152d57b 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -224,7 +224,7 @@ static void __spu_kernel_slb(void *addr, struct copro_slb *slb) unsigned long ea = (unsigned long)addr; u64 llp; - if (get_region_id(ea) == KERNEL_REGION_ID) + if (get_region_id(ea) == LINEAR_MAP_REGION_ID) llp = mmu_psize_defs[mmu_linear_psize].sllp; else llp = mmu_psize_defs[mmu_virtual_psize].sllp; |