From bb7f380849f8c8722ea383ec5867a79d365d4574 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 17 May 2016 09:02:51 +0200 Subject: powerpc/8xx: Don't use page table for linear memory space Instead of using the first level page table to define mappings for the linear memory space, we can use direct mapping from the TLB handling routines. This has several advantages: * No need to read the tables at each TLB miss * No issue in 16k pages mode where the 1st level table maps 64 Mbytes The size of the available linear space is known at system startup. In order to avoid data access at each TLB miss to know the memory size, the TLB routine is patched at startup with the proper size This patch provides a 10%-15% improvment of TLB miss handling for kernel addresses Signed-off-by: Christophe Leroy Signed-off-by: Scott Wood --- arch/powerpc/mm/8xx_mmu.c | 56 +++++++++++++++-------------------------------- 1 file changed, 18 insertions(+), 38 deletions(-) (limited to 'arch/powerpc/mm/8xx_mmu.c') diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index 220772579113..996dfaa352e0 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -58,9 +58,7 @@ void __init MMU_init_hw(void) /* Nothing to do for the time being but keep it similar to other PPC */ } -#define LARGE_PAGE_SIZE_4M (1<<22) #define LARGE_PAGE_SIZE_8M (1<<23) -#define LARGE_PAGE_SIZE_64M (1<<26) static void mmu_mapin_immr(void) { @@ -77,52 +75,33 @@ static void mmu_mapin_immr(void) #ifndef CONFIG_PIN_TLB extern unsigned int DTLBMiss_jmp; #endif +extern unsigned int DTLBMiss_cmp, FixupDAR_cmp; -unsigned long __init mmu_mapin_ram(unsigned long top) +void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped) { - unsigned long v, s, mapped; - phys_addr_t p; + unsigned int instr = *addr; - v = KERNELBASE; - p = 0; - s = top; + instr &= 0xffff0000; + instr |= (unsigned long)__va(mapped) >> 16; + patch_instruction(addr, instr); +} + +unsigned long __init mmu_mapin_ram(unsigned long top) +{ + unsigned long mapped; if (__map_without_ltlbs) { + mapped = 0; mmu_mapin_immr(); #ifndef CONFIG_PIN_TLB patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); #endif - return 0; - } - -#ifdef CONFIG_PPC_4K_PAGES - while (s >= LARGE_PAGE_SIZE_8M) { - pmd_t *pmdp; - unsigned long val = p | MD_PS8MEG; - - pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); - *pmdp++ = __pmd(val); - *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M); - - v += LARGE_PAGE_SIZE_8M; - p += LARGE_PAGE_SIZE_8M; - s -= LARGE_PAGE_SIZE_8M; + } else { + mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); } -#else /* CONFIG_PPC_16K_PAGES */ - while (s >= LARGE_PAGE_SIZE_64M) { - pmd_t *pmdp; - unsigned long val = p | MD_PS8MEG; - - pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); - *pmdp++ = __pmd(val); - - v += LARGE_PAGE_SIZE_64M; - p += LARGE_PAGE_SIZE_64M; - s -= LARGE_PAGE_SIZE_64M; - } -#endif - mapped = top - s; + mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped); + mmu_patch_cmp_limit(&FixupDAR_cmp, mapped); /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 8 MiB @@ -131,7 +110,8 @@ unsigned long __init mmu_mapin_ram(unsigned long top) * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ - memblock_set_current_limit(mapped); + if (mapped) + memblock_set_current_limit(mapped); return mapped; } -- cgit v1.2.3