diff options
author | Steve Capper <steve.capper@arm.com> | 2019-08-07 16:55:18 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2019-08-09 11:17:21 +0100 |
commit | 5383cc6efed13784ddb3cff2cc183b6b8c50c8db (patch) | |
tree | e6a050f886096231108a344f6cad53bfa362d169 /arch/arm64 | |
parent | 90ec95cda91a021d82351c976896a63aa364ebf1 (diff) |
arm64: mm: Introduce vabits_actual
In order to support 52-bit kernel addresses detectable at boot time, one
needs to know the actual VA_BITS detected. A new variable vabits_actual
is introduced in this commit and employed for the KVM hypervisor layout,
KASAN, fault handling and phys-to/from-virt translation where there
would normally be compile time constants.
In order to maintain performance in phys_to_virt, another variable
physvirt_offset is introduced.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/kasan.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/arm64/kvm/va_layout.c | 14 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 7 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 3 |
8 files changed, 31 insertions, 17 deletions
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 10d2add842da..b0dc4abc3589 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -31,7 +31,7 @@ * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) -#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS) +#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 79e75e45d560..364635b8370a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -37,8 +37,6 @@ * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define VA_START (UL(0xffffffffffffffff) - \ - (UL(1) << (VA_BITS - 1)) + 1) #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << VA_BITS) + 1) #define KIMAGE_VADDR (MODULES_END) @@ -166,10 +164,13 @@ #endif #ifndef __ASSEMBLY__ +extern u64 vabits_actual; +#define VA_START (_VA_START(vabits_actual)) #include <linux/bitops.h> #include <linux/mmdebug.h> +extern s64 physvirt_offset; extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) @@ -240,9 +241,9 @@ static inline const void *__tag_set(const void *addr, u8 tag) * space. Testing the top bit for the start of the region is a * sufficient check. */ -#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1))) +#define __is_lm_address(addr) (!((addr) & BIT(vabits_actual - 1))) -#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) +#define __lm_to_phys(addr) (((addr) + physvirt_offset)) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ @@ -261,7 +262,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) #endif -#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) +#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) /* diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7ed0adb187a8..670003a55d28 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) isb(); } -#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) +#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ac58c69993ec..6dc7349868d9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -321,6 +321,11 @@ __create_page_tables: dmb sy dc ivac, x6 // Invalidate potentially stale cache line + adr_l x6, vabits_actual + str x5, [x6] + dmb sy + dc ivac, x6 // Invalidate potentially stale cache line + /* * VA_BITS may be too small to allow for an ID mapping to be created * that covers system RAM if that is located sufficiently high in the diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index acd8084f1f2c..2cf7d4b606c3 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -29,25 +29,25 @@ static void compute_layout(void) int kva_msb; /* Where is my RAM region? */ - hyp_va_msb = idmap_addr & BIT(VA_BITS - 1); - hyp_va_msb ^= BIT(VA_BITS - 1); + hyp_va_msb = idmap_addr & BIT(vabits_actual - 1); + hyp_va_msb ^= BIT(vabits_actual - 1); kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ (u64)(high_memory - 1)); - if (kva_msb == (VA_BITS - 1)) { + if (kva_msb == (vabits_actual - 1)) { /* * No space in the address, let's compute the mask so - * that it covers (VA_BITS - 1) bits, and the region + * that it covers (vabits_actual - 1) bits, and the region * bit. The tag stays set to zero. */ - va_mask = BIT(VA_BITS - 1) - 1; + va_mask = BIT(vabits_actual - 1) - 1; va_mask |= hyp_va_msb; } else { /* * We do have some free bits to insert a random tag. * Hyp VAs are now created from kernel linear map VAs - * using the following formula (with V == VA_BITS): + * using the following formula (with V == vabits_actual): * * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 * --------------------------------------------------------- @@ -55,7 +55,7 @@ static void compute_layout(void) */ tag_lsb = kva_msb; va_mask = GENMASK_ULL(tag_lsb - 1, 0); - tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb); + tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); tag_val |= hyp_va_msb; tag_val >>= tag_lsb; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cfd65b63f36f..6b195871769a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -138,9 +138,9 @@ static void show_pte(unsigned long addr) return; } - pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n", + pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - mm == &init_mm ? VA_BITS : (int)vabits_user, + mm == &init_mm ? vabits_actual : (int)vabits_user, (unsigned long)virt_to_phys(mm->pgd)); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 62927ed02229..e752f46d430e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -50,6 +50,9 @@ s64 memstart_addr __ro_after_init = -1; EXPORT_SYMBOL(memstart_addr); +s64 physvirt_offset __ro_after_init; +EXPORT_SYMBOL(physvirt_offset); + phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_KEXEC_CORE @@ -301,7 +304,7 @@ static void __init fdt_enforce_memory_region(void) void __init arm64_memblock_init(void) { - const s64 linear_region_size = BIT(VA_BITS - 1); + const s64 linear_region_size = BIT(vabits_actual - 1); /* Handle linux,usable-memory-range property */ fdt_enforce_memory_region(); @@ -315,6 +318,8 @@ void __init arm64_memblock_init(void) memstart_addr = round_down(memblock_start_of_DRAM(), ARM64_MEMSTART_ALIGN); + physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; + /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1d4247f9a496..07b30e6d17f8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -43,6 +43,9 @@ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 vabits_user __ro_after_init; EXPORT_SYMBOL(vabits_user); +u64 __section(".mmuoff.data.write") vabits_actual; +EXPORT_SYMBOL(vabits_actual); + u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); |