diff options
author | Linus Walleij <linus.walleij@linaro.org> | 2021-08-09 12:57:19 +0100 |
---|---|---|
committer | Russell King (Oracle) <rmk+kernel@armlinux.org.uk> | 2021-08-10 12:17:25 +0100 |
commit | 463dbba4d189750c2f576449d0bbb11c5413712e (patch) | |
tree | f207401d96165739964822b4230795da11fecb35 /arch/arm/kernel | |
parent | e73f0f0ee7541171d89f2e2491130c7771ba58d3 (diff) |
ARM: 9104/2: Fix Keystone 2 kernel mapping regression
This fixes a Keystone 2 regression discovered as a side effect of
defining an passing the physical start/end sections of the kernel
to the MMU remapping code.
As the Keystone applies an offset to all physical addresses,
including those identified and patches by phys2virt, we fail to
account for this offset in the kernel_sec_start and kernel_sec_end
variables.
Further these offsets can extend into the 64bit range on LPAE
systems such as the Keystone 2.
Fix it like this:
- Extend kernel_sec_start and kernel_sec_end to be 64bit
- Add the offset also to kernel_sec_start and kernel_sec_end
As passing kernel_sec_start and kernel_sec_end as 64bit invariably
incurs BE8 endianness issues I have attempted to dry-code around
these.
Tested on the Vexpress QEMU model both with and without LPAE
enabled.
Fixes: 6e121df14ccd ("ARM: 9090/1: Map the lowmem and kernel separately")
Reported-by: Nishanth Menon <nmenon@kernel.org>
Suggested-by: Russell King <rmk+kernel@armlinux.org.uk>
Tested-by: Grygorii Strashko <grygorii.strashko@ti.com>
Tested-by: Nishanth Menon <nmenon@kernel.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/head.S | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 9eb0b4dbcc12..29070eb8df7d 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -49,7 +49,8 @@ /* * This needs to be assigned at runtime when the linker symbols are - * resolved. + * resolved. These are unsigned 64bit really, but in this assembly code + * We store them as 32bit. */ .pushsection .data .align 2 @@ -57,8 +58,10 @@ .globl kernel_sec_end kernel_sec_start: .long 0 + .long 0 kernel_sec_end: .long 0 + .long 0 .popsection .macro pgtbl, rd, phys @@ -250,7 +253,11 @@ __create_page_tables: add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) ldr r6, =(_end - 1) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) - str r8, [r5] @ Save physical start of kernel +#ifdef CONFIG_CPU_ENDIAN_BE8 + str r8, [r5, #4] @ Save physical start of kernel (BE) +#else + str r8, [r5] @ Save physical start of kernel (LE) +#endif orr r3, r8, r7 @ Add the MMU flags add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: str r3, [r0], #1 << PMD_ORDER @@ -259,7 +266,11 @@ __create_page_tables: bls 1b eor r3, r3, r7 @ Remove the MMU flags adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) - str r3, [r5] @ Save physical end of kernel +#ifdef CONFIG_CPU_ENDIAN_BE8 + str r3, [r5, #4] @ Save physical end of kernel (BE) +#else + str r3, [r5] @ Save physical end of kernel (LE) +#endif #ifdef CONFIG_XIP_KERNEL /* |