From 35e61c77ef386555f3df1bc2057098c6997ca10b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 6 Jan 2020 19:58:16 +0000 Subject: arm64: asm: Add new-style position independent function annotations As part of an effort to make the annotations in assembly code clearer and more consistent new macros have been introduced, including replacements for ENTRY() and ENDPROC(). On arm64 we have ENDPIPROC(), a custom version of ENDPROC() which is used for code that will need to run in position independent environments like EFI, it creates an alias for the function with the prefix __pi_ and then emits the standard ENDPROC. Add new-style macros to replace this which expand to the standard SYM_FUNC_*() and SYM_FUNC_ALIAS_*(), resulting in the same object code. These are added in linkage.h for consistency with where the generic assembler code has its macros. Signed-off-by: Mark Brown [will: Rename 'WEAK' macro, use ';' instead of ASM_NL, deprecate ENDPIPROC] Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 1 + arch/arm64/include/asm/linkage.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b8cf7c85ffa2..4a4258f17c86 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -462,6 +462,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* + * Deprecated! Use SYM_FUNC_{START,START_WEAK,END}_PI instead. * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. */ diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1b266292f0be..ebee3113a62f 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -4,4 +4,20 @@ #define __ALIGN .align 2 #define __ALIGN_STR ".align 2" +/* + * Annotate a function as position independent, i.e., safe to be called before + * the kernel virtual mapping is activated. + */ +#define SYM_FUNC_START_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_FUNC_START(x) + +#define SYM_FUNC_START_WEAK_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_FUNC_START_WEAK(x) + +#define SYM_FUNC_END_PI(x) \ + SYM_FUNC_END(x); \ + SYM_FUNC_END_ALIAS(__pi_##x) + #endif -- cgit v1.2.3 From 3ac0f4526dfb80625f5c2365bccd85be68db93ef Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 6 Jan 2020 19:58:17 +0000 Subject: arm64: lib: Use modern annotations for assembly functions In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC and also add a new annotation for static functions which previously had no ENTRY equivalent. Update the annotations in the library code to the new macros. Signed-off-by: Mark Brown [will: Use SYM_FUNC_START_WEAK_PI] Signed-off-by: Will Deacon --- arch/arm64/lib/clear_page.S | 4 ++-- arch/arm64/lib/clear_user.S | 4 ++-- arch/arm64/lib/copy_from_user.S | 4 ++-- arch/arm64/lib/copy_in_user.S | 4 ++-- arch/arm64/lib/copy_page.S | 4 ++-- arch/arm64/lib/copy_to_user.S | 4 ++-- arch/arm64/lib/crc32.S | 8 ++++---- arch/arm64/lib/memchr.S | 4 ++-- arch/arm64/lib/memcmp.S | 4 ++-- arch/arm64/lib/memcpy.S | 8 ++++---- arch/arm64/lib/memmove.S | 8 ++++---- arch/arm64/lib/memset.S | 8 ++++---- arch/arm64/lib/strchr.S | 4 ++-- arch/arm64/lib/strcmp.S | 4 ++-- arch/arm64/lib/strlen.S | 4 ++-- arch/arm64/lib/strncmp.S | 4 ++-- arch/arm64/lib/strnlen.S | 4 ++-- arch/arm64/lib/strrchr.S | 4 ++-- arch/arm64/lib/tishift.S | 12 ++++++------ 19 files changed, 50 insertions(+), 50 deletions(-) (limited to 'arch') diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index 78a9ef66288a..073acbf02a7c 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -14,7 +14,7 @@ * Parameters: * x0 - dest */ -ENTRY(clear_page) +SYM_FUNC_START(clear_page) mrs x1, dczid_el0 and w1, w1, #0xf mov x2, #4 @@ -25,5 +25,5 @@ ENTRY(clear_page) tst x0, #(PAGE_SIZE - 1) b.ne 1b ret -ENDPROC(clear_page) +SYM_FUNC_END(clear_page) EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index aeafc03e961a..48a3a26eff66 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -19,7 +19,7 @@ * * Alignment fixed up by hardware. */ -ENTRY(__arch_clear_user) +SYM_FUNC_START(__arch_clear_user) mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -40,7 +40,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 ret -ENDPROC(__arch_clear_user) +SYM_FUNC_END(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index ebb3c06cbb5d..8e25e89ad01f 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -53,12 +53,12 @@ .endm end .req x5 -ENTRY(__arch_copy_from_user) +SYM_FUNC_START(__arch_copy_from_user) add end, x0, x2 #include "copy_template.S" mov x0, #0 // Nothing to copy ret -ENDPROC(__arch_copy_from_user) +SYM_FUNC_END(__arch_copy_from_user) EXPORT_SYMBOL(__arch_copy_from_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 3d8153a1ebce..667139013ed1 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -55,12 +55,12 @@ end .req x5 -ENTRY(__arch_copy_in_user) +SYM_FUNC_START(__arch_copy_in_user) add end, x0, x2 #include "copy_template.S" mov x0, #0 ret -ENDPROC(__arch_copy_in_user) +SYM_FUNC_END(__arch_copy_in_user) EXPORT_SYMBOL(__arch_copy_in_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index bbb8562396af..e125a84eb400 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -17,7 +17,7 @@ * x0 - dest * x1 - src */ -ENTRY(copy_page) +SYM_FUNC_START(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH // Prefetch three cache lines ahead. prfm pldl1strm, [x1, #128] @@ -75,5 +75,5 @@ alternative_else_nop_endif stnp x16, x17, [x0, #112] ret -ENDPROC(copy_page) +SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 357eae2c18eb..1a104d0089f3 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -52,12 +52,12 @@ .endm end .req x5 -ENTRY(__arch_copy_to_user) +SYM_FUNC_START(__arch_copy_to_user) add end, x0, x2 #include "copy_template.S" mov x0, #0 ret -ENDPROC(__arch_copy_to_user) +SYM_FUNC_END(__arch_copy_to_user) EXPORT_SYMBOL(__arch_copy_to_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S index e6135f16649b..243e107e9896 100644 --- a/arch/arm64/lib/crc32.S +++ b/arch/arm64/lib/crc32.S @@ -85,17 +85,17 @@ CPU_BE( rev16 w3, w3 ) .endm .align 5 -ENTRY(crc32_le) +SYM_FUNC_START(crc32_le) alternative_if_not ARM64_HAS_CRC32 b crc32_le_base alternative_else_nop_endif __crc32 -ENDPROC(crc32_le) +SYM_FUNC_END(crc32_le) .align 5 -ENTRY(__crc32c_le) +SYM_FUNC_START(__crc32c_le) alternative_if_not ARM64_HAS_CRC32 b __crc32c_le_base alternative_else_nop_endif __crc32 c -ENDPROC(__crc32c_le) +SYM_FUNC_END(__crc32c_le) diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 48a3ab636e4f..edf6b970a277 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -19,7 +19,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -WEAK(memchr) +SYM_FUNC_START_WEAK_PI(memchr) and w1, w1, #0xff 1: subs x2, x2, #1 b.mi 2f @@ -30,5 +30,5 @@ WEAK(memchr) ret 2: mov x0, #0 ret -ENDPIPROC(memchr) +SYM_FUNC_END_PI(memchr) EXPORT_SYMBOL_NOKASAN(memchr) diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index b297bdaaf549..c0671e793ea9 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -46,7 +46,7 @@ pos .req x11 limit_wd .req x12 mask .req x13 -WEAK(memcmp) +SYM_FUNC_START_WEAK_PI(memcmp) cbz limit, .Lret0 eor tmp1, src1, src2 tst tmp1, #7 @@ -243,5 +243,5 @@ CPU_LE( rev data2, data2 ) .Lret0: mov result, #0 ret -ENDPIPROC(memcmp) +SYM_FUNC_END_PI(memcmp) EXPORT_SYMBOL_NOKASAN(memcmp) diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index d79f48994dbb..9f382adfa88a 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -57,11 +57,11 @@ .endm .weak memcpy -ENTRY(__memcpy) -ENTRY(memcpy) +SYM_FUNC_START_ALIAS(__memcpy) +SYM_FUNC_START_PI(memcpy) #include "copy_template.S" ret -ENDPIPROC(memcpy) +SYM_FUNC_END_PI(memcpy) EXPORT_SYMBOL(memcpy) -ENDPROC(__memcpy) +SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(__memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index 784775136480..02cda2e33bde 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -46,8 +46,8 @@ D_l .req x13 D_h .req x14 .weak memmove -ENTRY(__memmove) -ENTRY(memmove) +SYM_FUNC_START_ALIAS(__memmove) +SYM_FUNC_START_PI(memmove) cmp dstin, src b.lo __memcpy add tmp1, src, count @@ -184,7 +184,7 @@ ENTRY(memmove) tst count, #0x3f b.ne .Ltail63 ret -ENDPIPROC(memmove) +SYM_FUNC_END_PI(memmove) EXPORT_SYMBOL(memmove) -ENDPROC(__memmove) +SYM_FUNC_END_ALIAS(__memmove) EXPORT_SYMBOL(__memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index 9fb97e6bc560..77c3c7ba0084 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -43,8 +43,8 @@ tmp3w .req w9 tmp3 .req x9 .weak memset -ENTRY(__memset) -ENTRY(memset) +SYM_FUNC_START_ALIAS(__memset) +SYM_FUNC_START_PI(memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 orr A_lw, A_lw, A_lw, lsl #8 @@ -203,7 +203,7 @@ ENTRY(memset) ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret -ENDPIPROC(memset) +SYM_FUNC_END_PI(memset) EXPORT_SYMBOL(memset) -ENDPROC(__memset) +SYM_FUNC_END_ALIAS(__memset) EXPORT_SYMBOL(__memset) diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S index ca3ec18171a4..1f47eae3b0d6 100644 --- a/arch/arm64/lib/strchr.S +++ b/arch/arm64/lib/strchr.S @@ -18,7 +18,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -WEAK(strchr) +SYM_FUNC_START_WEAK(strchr) and w1, w1, #0xff 1: ldrb w2, [x0], #1 cmp w2, w1 @@ -28,5 +28,5 @@ WEAK(strchr) cmp w2, w1 csel x0, x0, xzr, eq ret -ENDPROC(strchr) +SYM_FUNC_END(strchr) EXPORT_SYMBOL_NOKASAN(strchr) diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index e9aefbe0b740..4767540d1b94 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -48,7 +48,7 @@ tmp3 .req x9 zeroones .req x10 pos .req x11 -WEAK(strcmp) +SYM_FUNC_START_WEAK_PI(strcmp) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 @@ -219,5 +219,5 @@ CPU_BE( orr syndrome, diff, has_nul ) lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret -ENDPIPROC(strcmp) +SYM_FUNC_END_PI(strcmp) EXPORT_SYMBOL_NOKASAN(strcmp) diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 87b0cb066915..ee3ed882dd79 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -44,7 +44,7 @@ pos .req x12 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -WEAK(strlen) +SYM_FUNC_START_WEAK_PI(strlen) mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 @@ -111,5 +111,5 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned -ENDPIPROC(strlen) +SYM_FUNC_END_PI(strlen) EXPORT_SYMBOL_NOKASAN(strlen) diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index f571581888fa..2a7ee949ed47 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -52,7 +52,7 @@ limit_wd .req x13 mask .req x14 endloop .req x15 -WEAK(strncmp) +SYM_FUNC_START_WEAK_PI(strncmp) cbz limit, .Lret0 eor tmp1, src1, src2 mov zeroones, #REP8_01 @@ -295,5 +295,5 @@ CPU_BE( orr syndrome, diff, has_nul ) .Lret0: mov result, #0 ret -ENDPIPROC(strncmp) +SYM_FUNC_END_PI(strncmp) EXPORT_SYMBOL_NOKASAN(strncmp) diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S index c0bac9493c68..b72913a99038 100644 --- a/arch/arm64/lib/strnlen.S +++ b/arch/arm64/lib/strnlen.S @@ -47,7 +47,7 @@ limit_wd .req x14 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -WEAK(strnlen) +SYM_FUNC_START_WEAK_PI(strnlen) cbz limit, .Lhit_limit mov zeroones, #REP8_01 bic src, srcin, #15 @@ -156,5 +156,5 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ .Lhit_limit: mov len, limit ret -ENDPIPROC(strnlen) +SYM_FUNC_END_PI(strnlen) EXPORT_SYMBOL_NOKASAN(strnlen) diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S index 794ac49ea433..13132d1ed6d1 100644 --- a/arch/arm64/lib/strrchr.S +++ b/arch/arm64/lib/strrchr.S @@ -18,7 +18,7 @@ * Returns: * x0 - address of last occurrence of 'c' or 0 */ -WEAK(strrchr) +SYM_FUNC_START_WEAK_PI(strrchr) mov x3, #0 and w1, w1, #0xff 1: ldrb w2, [x0], #1 @@ -29,5 +29,5 @@ WEAK(strrchr) b 1b 2: mov x0, x3 ret -ENDPIPROC(strrchr) +SYM_FUNC_END_PI(strrchr) EXPORT_SYMBOL_NOKASAN(strrchr) diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S index 047622536535..a88613834fb0 100644 --- a/arch/arm64/lib/tishift.S +++ b/arch/arm64/lib/tishift.S @@ -7,7 +7,7 @@ #include -ENTRY(__ashlti3) +SYM_FUNC_START(__ashlti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 @@ -26,10 +26,10 @@ ENTRY(__ashlti3) lsl x1, x0, x1 mov x0, x2 ret -ENDPROC(__ashlti3) +SYM_FUNC_END(__ashlti3) EXPORT_SYMBOL(__ashlti3) -ENTRY(__ashrti3) +SYM_FUNC_START(__ashrti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 @@ -48,10 +48,10 @@ ENTRY(__ashrti3) asr x0, x1, x0 mov x1, x2 ret -ENDPROC(__ashrti3) +SYM_FUNC_END(__ashrti3) EXPORT_SYMBOL(__ashrti3) -ENTRY(__lshrti3) +SYM_FUNC_START(__lshrti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 @@ -70,5 +70,5 @@ ENTRY(__lshrti3) lsr x0, x1, x0 mov x1, x2 ret -ENDPROC(__lshrti3) +SYM_FUNC_END(__lshrti3) EXPORT_SYMBOL(__lshrti3) -- cgit v1.2.3 From f4659254a327f5e61c24e21b91c5aea0291bc65b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 6 Jan 2020 19:58:18 +0000 Subject: arm64: mm: Use modern annotations for assembly functions In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC and also add a new annotation for static functions which previously had no ENTRY equivalent. Update the annotations in the mm code to the new macros. Even the functions called from non-standard environments like idmap have no special requirements on their environments so can be treated like regular functions. Signed-off-by: Mark Brown Signed-off-by: Will Deacon --- arch/arm64/mm/cache.S | 52 +++++++++++++++++++++++++-------------------------- arch/arm64/mm/proc.S | 24 ++++++++++++------------ 2 files changed, 38 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index db767b072601..2d881f34dd9d 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -24,7 +24,7 @@ * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(__flush_icache_range) +SYM_FUNC_START(__flush_icache_range) /* FALLTHROUGH */ /* @@ -37,7 +37,7 @@ ENTRY(__flush_icache_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(__flush_cache_user_range) +SYM_FUNC_START(__flush_cache_user_range) uaccess_ttbr0_enable x2, x3, x4 alternative_if ARM64_HAS_CACHE_IDC dsb ishst @@ -66,8 +66,8 @@ alternative_else_nop_endif 9: mov x0, #-EFAULT b 1b -ENDPROC(__flush_icache_range) -ENDPROC(__flush_cache_user_range) +SYM_FUNC_END(__flush_icache_range) +SYM_FUNC_END(__flush_cache_user_range) /* * invalidate_icache_range(start,end) @@ -77,7 +77,7 @@ ENDPROC(__flush_cache_user_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(invalidate_icache_range) +SYM_FUNC_START(invalidate_icache_range) alternative_if ARM64_HAS_CACHE_DIC mov x0, xzr isb @@ -94,7 +94,7 @@ alternative_else_nop_endif 2: mov x0, #-EFAULT b 1b -ENDPROC(invalidate_icache_range) +SYM_FUNC_END(invalidate_icache_range) /* * __flush_dcache_area(kaddr, size) @@ -105,10 +105,10 @@ ENDPROC(invalidate_icache_range) * - kaddr - kernel address * - size - size in question */ -ENTRY(__flush_dcache_area) +SYM_FUNC_START_PI(__flush_dcache_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__flush_dcache_area) +SYM_FUNC_END_PI(__flush_dcache_area) /* * __clean_dcache_area_pou(kaddr, size) @@ -119,14 +119,14 @@ ENDPIPROC(__flush_dcache_area) * - kaddr - kernel address * - size - size in question */ -ENTRY(__clean_dcache_area_pou) +SYM_FUNC_START(__clean_dcache_area_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret -ENDPROC(__clean_dcache_area_pou) +SYM_FUNC_END(__clean_dcache_area_pou) /* * __inval_dcache_area(kaddr, size) @@ -138,7 +138,8 @@ ENDPROC(__clean_dcache_area_pou) * - kaddr - kernel address * - size - size in question */ -ENTRY(__inval_dcache_area) +SYM_FUNC_START_LOCAL(__dma_inv_area) +SYM_FUNC_START_PI(__inval_dcache_area) /* FALLTHROUGH */ /* @@ -146,7 +147,6 @@ ENTRY(__inval_dcache_area) * - start - virtual start address of region * - size - size in question */ -__dma_inv_area: add x1, x1, x0 dcache_line_size x2, x3 sub x3, x2, #1 @@ -165,8 +165,8 @@ __dma_inv_area: b.lo 2b dsb sy ret -ENDPIPROC(__inval_dcache_area) -ENDPROC(__dma_inv_area) +SYM_FUNC_END_PI(__inval_dcache_area) +SYM_FUNC_END(__dma_inv_area) /* * __clean_dcache_area_poc(kaddr, size) @@ -177,7 +177,8 @@ ENDPROC(__dma_inv_area) * - kaddr - kernel address * - size - size in question */ -ENTRY(__clean_dcache_area_poc) +SYM_FUNC_START_LOCAL(__dma_clean_area) +SYM_FUNC_START_PI(__clean_dcache_area_poc) /* FALLTHROUGH */ /* @@ -185,11 +186,10 @@ ENTRY(__clean_dcache_area_poc) * - start - virtual start address of region * - size - size in question */ -__dma_clean_area: dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__clean_dcache_area_poc) -ENDPROC(__dma_clean_area) +SYM_FUNC_END_PI(__clean_dcache_area_poc) +SYM_FUNC_END(__dma_clean_area) /* * __clean_dcache_area_pop(kaddr, size) @@ -200,13 +200,13 @@ ENDPROC(__dma_clean_area) * - kaddr - kernel address * - size - size in question */ -ENTRY(__clean_dcache_area_pop) +SYM_FUNC_START_PI(__clean_dcache_area_pop) alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret -ENDPIPROC(__clean_dcache_area_pop) +SYM_FUNC_END_PI(__clean_dcache_area_pop) /* * __dma_flush_area(start, size) @@ -216,10 +216,10 @@ ENDPIPROC(__clean_dcache_area_pop) * - start - virtual start address of region * - size - size in question */ -ENTRY(__dma_flush_area) +SYM_FUNC_START_PI(__dma_flush_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__dma_flush_area) +SYM_FUNC_END_PI(__dma_flush_area) /* * __dma_map_area(start, size, dir) @@ -227,11 +227,11 @@ ENDPIPROC(__dma_flush_area) * - size - size of region * - dir - DMA direction */ -ENTRY(__dma_map_area) +SYM_FUNC_START_PI(__dma_map_area) cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_area b __dma_clean_area -ENDPIPROC(__dma_map_area) +SYM_FUNC_END_PI(__dma_map_area) /* * __dma_unmap_area(start, size, dir) @@ -239,8 +239,8 @@ ENDPIPROC(__dma_map_area) * - size - size of region * - dir - DMA direction */ -ENTRY(__dma_unmap_area) +SYM_FUNC_START_PI(__dma_unmap_area) cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_area ret -ENDPIPROC(__dma_unmap_area) +SYM_FUNC_END_PI(__dma_unmap_area) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a1e0592d1fbc..9a8b1b14ce02 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -50,7 +50,7 @@ * * x0: virtual address of context pointer */ -ENTRY(cpu_do_suspend) +SYM_FUNC_START(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 @@ -74,7 +74,7 @@ alternative_endif stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] ret -ENDPROC(cpu_do_suspend) +SYM_FUNC_END(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context @@ -82,7 +82,7 @@ ENDPROC(cpu_do_suspend) * x0: Address of context pointer */ .pushsection ".idmap.text", "awx" -ENTRY(cpu_do_resume) +SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] ldp x6, x8, [x0, #32] @@ -131,7 +131,7 @@ alternative_else_nop_endif isb ret -ENDPROC(cpu_do_resume) +SYM_FUNC_END(cpu_do_resume) .popsection #endif @@ -142,7 +142,7 @@ ENDPROC(cpu_do_resume) * * - pgd_phys - physical address of new TTB */ -ENTRY(cpu_do_switch_mm) +SYM_FUNC_START(cpu_do_switch_mm) mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id phys_to_ttbr x3, x0 @@ -161,7 +161,7 @@ alternative_else_nop_endif msr ttbr0_el1, x3 // now update TTBR0 isb b post_ttbr_update_workaround // Back to C code... -ENDPROC(cpu_do_switch_mm) +SYM_FUNC_END(cpu_do_switch_mm) .pushsection ".idmap.text", "awx" @@ -182,7 +182,7 @@ ENDPROC(cpu_do_switch_mm) * This is the low-level counterpart to cpu_replace_ttbr1, and should not be * called by anything else. It can only be executed from a TTBR0 mapping. */ -ENTRY(idmap_cpu_replace_ttbr1) +SYM_FUNC_START(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 __idmap_cpu_set_reserved_ttbr1 x1, x3 @@ -194,7 +194,7 @@ ENTRY(idmap_cpu_replace_ttbr1) restore_daif x2 ret -ENDPROC(idmap_cpu_replace_ttbr1) +SYM_FUNC_END(idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 @@ -222,7 +222,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) */ __idmap_kpti_flag: .long 1 -ENTRY(idmap_kpti_install_ng_mappings) +SYM_FUNC_START(idmap_kpti_install_ng_mappings) cpu .req w0 num_cpus .req w1 swapper_pa .req x2 @@ -393,7 +393,7 @@ __idmap_kpti_secondary: .unreq cur_ptep .unreq end_ptep .unreq pte -ENDPROC(idmap_kpti_install_ng_mappings) +SYM_FUNC_END(idmap_kpti_install_ng_mappings) .popsection #endif @@ -404,7 +404,7 @@ ENDPROC(idmap_kpti_install_ng_mappings) * value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" -ENTRY(__cpu_setup) +SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh @@ -475,4 +475,4 @@ ENTRY(__cpu_setup) #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 ret // return to head.S -ENDPROC(__cpu_setup) +SYM_FUNC_END(__cpu_setup) -- cgit v1.2.3 From 3b54b743397eb51f879f2bf24b9938b80e5a2092 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:16 -0500 Subject: arm64: kexec: remove unnecessary debug prints The kexec_image_info() outputs all the necessary information about the upcoming kexec. The extra debug printfs in machine_kexec() are not needed. Signed-off-by: Pavel Tatashin Signed-off-by: Will Deacon --- arch/arm64/kernel/machine_kexec.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 0df8493624e0..8e9c924423b4 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -160,18 +160,6 @@ void machine_kexec(struct kimage *kimage) kexec_image_info(kimage); - pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__, - kimage->control_code_page); - pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__, - &reboot_code_buffer_phys); - pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__, - reboot_code_buffer); - pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__, - arm64_relocate_new_kernel); - pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n", - __func__, __LINE__, arm64_relocate_new_kernel_size, - arm64_relocate_new_kernel_size); - /* * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use * after the kernel is shut down. -- cgit v1.2.3 From 621516789ee6e285cb2088fe4706eedd030d38bf Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:17 -0500 Subject: arm64: kexec: make dtb_mem always enabled Currently, dtb_mem is enabled only when CONFIG_KEXEC_FILE is enabled. This adds ugly ifdefs to c files. Always enabled dtb_mem, when it is not used, it is NULL. Change the dtb_mem to phys_addr_t, as it is a physical address. Signed-off-by: Pavel Tatashin Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 4 ++-- arch/arm64/kernel/machine_kexec.c | 6 +----- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 12a561a54128..ad6afed69078 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,14 +90,14 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif -#ifdef CONFIG_KEXEC_FILE #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { void *dtb; - unsigned long dtb_mem; + phys_addr_t dtb_mem; }; +#ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; struct kimage; diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 8e9c924423b4..ae1bad0156cd 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -203,11 +203,7 @@ void machine_kexec(struct kimage *kimage) * In kexec_file case, the kernel starts directly without purgatory. */ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, -#ifdef CONFIG_KEXEC_FILE - kimage->arch.dtb_mem); -#else - 0); -#endif + kimage->arch.dtb_mem); BUG(); /* Should never get here. */ } -- cgit v1.2.3 From d234332c281533a46e77dceff3901ef63e546cce Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:18 -0500 Subject: arm64: hibernate: pass the allocated pgdp to ttbr0 ttbr0 should be set to the beginning of pgdp, however, currently in create_safe_exec_page it is set to pgdp after pgd_offset_raw(), which works by accident. Signed-off-by: Pavel Tatashin Reviewed-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index a96b2921d22c..ef46ce66d7e8 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -269,7 +269,7 @@ static int create_safe_exec_page(void *src_start, size_t length, */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1); + write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1); isb(); *phys_dst_addr = virt_to_phys((void *)dst); -- cgit v1.2.3 From 051a7a94aaa9ca8e057aa9136073fb5e6e2cef5e Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:19 -0500 Subject: arm64: hibernate: use get_safe_page directly create_safe_exec_page() uses hibernate's allocator to create a set of page table to map a single page that will contain the relocation code. Remove the allocator related arguments, and use get_safe_page directly, as it is done in other local functions in this file to simplify function prototype. Removing this function pointer makes it easier to refactor the code later. Signed-off-by: Pavel Tatashin Reviewed-by: Matthias Brugger Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index ef46ce66d7e8..34297716643f 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -196,9 +196,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore); */ static int create_safe_exec_page(void *src_start, size_t length, unsigned long dst_addr, - phys_addr_t *phys_dst_addr, - void *(*allocator)(gfp_t mask), - gfp_t mask) + phys_addr_t *phys_dst_addr) { int rc = 0; pgd_t *trans_pgd; @@ -206,7 +204,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pud_t *pudp; pmd_t *pmdp; pte_t *ptep; - unsigned long dst = (unsigned long)allocator(mask); + unsigned long dst = get_safe_page(GFP_ATOMIC); if (!dst) { rc = -ENOMEM; @@ -216,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy((void *)dst, src_start, length); __flush_icache_range(dst, dst + length); - trans_pgd = allocator(mask); + trans_pgd = (void *)get_safe_page(GFP_ATOMIC); if (!trans_pgd) { rc = -ENOMEM; goto out; @@ -224,7 +222,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pgdp = pgd_offset_raw(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { - pudp = allocator(mask); + pudp = (void *)get_safe_page(GFP_ATOMIC); if (!pudp) { rc = -ENOMEM; goto out; @@ -234,7 +232,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pudp = pud_offset(pgdp, dst_addr); if (pud_none(READ_ONCE(*pudp))) { - pmdp = allocator(mask); + pmdp = (void *)get_safe_page(GFP_ATOMIC); if (!pmdp) { rc = -ENOMEM; goto out; @@ -244,7 +242,7 @@ static int create_safe_exec_page(void *src_start, size_t length, pmdp = pmd_offset(pudp, dst_addr); if (pmd_none(READ_ONCE(*pmdp))) { - ptep = allocator(mask); + ptep = (void *)get_safe_page(GFP_ATOMIC); if (!ptep) { rc = -ENOMEM; goto out; @@ -530,8 +528,7 @@ int swsusp_arch_resume(void) */ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, (unsigned long)hibernate_exit, - &phys_hibernate_exit, - (void *)get_safe_page, GFP_ATOMIC); + &phys_hibernate_exit); if (rc) { pr_err("Failed to create safe executable page for hibernate_exit code.\n"); goto out; -- cgit v1.2.3 From a89d7ff933b0157a18e4010f10301d927b894634 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:20 -0500 Subject: arm64: hibernate: remove gotos as they are not needed Usually, gotos are used to handle cleanup after exception, but in case of create_safe_exec_page and swsusp_arch_resume there are no clean-ups. So, simply return the errors directly. Signed-off-by: Pavel Tatashin Reviewed-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 49 +++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 34297716643f..83c41a2f8400 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -198,7 +198,6 @@ static int create_safe_exec_page(void *src_start, size_t length, unsigned long dst_addr, phys_addr_t *phys_dst_addr) { - int rc = 0; pgd_t *trans_pgd; pgd_t *pgdp; pud_t *pudp; @@ -206,47 +205,37 @@ static int create_safe_exec_page(void *src_start, size_t length, pte_t *ptep; unsigned long dst = get_safe_page(GFP_ATOMIC); - if (!dst) { - rc = -ENOMEM; - goto out; - } + if (!dst) + return -ENOMEM; memcpy((void *)dst, src_start, length); __flush_icache_range(dst, dst + length); trans_pgd = (void *)get_safe_page(GFP_ATOMIC); - if (!trans_pgd) { - rc = -ENOMEM; - goto out; - } + if (!trans_pgd) + return -ENOMEM; pgdp = pgd_offset_raw(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { pudp = (void *)get_safe_page(GFP_ATOMIC); - if (!pudp) { - rc = -ENOMEM; - goto out; - } + if (!pudp) + return -ENOMEM; pgd_populate(&init_mm, pgdp, pudp); } pudp = pud_offset(pgdp, dst_addr); if (pud_none(READ_ONCE(*pudp))) { pmdp = (void *)get_safe_page(GFP_ATOMIC); - if (!pmdp) { - rc = -ENOMEM; - goto out; - } + if (!pmdp) + return -ENOMEM; pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, dst_addr); if (pmd_none(READ_ONCE(*pmdp))) { ptep = (void *)get_safe_page(GFP_ATOMIC); - if (!ptep) { - rc = -ENOMEM; - goto out; - } + if (!ptep) + return -ENOMEM; pmd_populate_kernel(&init_mm, pmdp, ptep); } @@ -272,8 +261,7 @@ static int create_safe_exec_page(void *src_start, size_t length, *phys_dst_addr = virt_to_phys((void *)dst); -out: - return rc; + return 0; } #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) @@ -482,7 +470,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, */ int swsusp_arch_resume(void) { - int rc = 0; + int rc; void *zero_page; size_t exit_size; pgd_t *tmp_pg_dir; @@ -498,12 +486,11 @@ int swsusp_arch_resume(void) tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!tmp_pg_dir) { pr_err("Failed to allocate memory for temporary page tables.\n"); - rc = -ENOMEM; - goto out; + return -ENOMEM; } rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END); if (rc) - goto out; + return rc; /* * We need a zero page that is zero before & after resume in order to @@ -512,8 +499,7 @@ int swsusp_arch_resume(void) zero_page = (void *)get_safe_page(GFP_ATOMIC); if (!zero_page) { pr_err("Failed to allocate zero page.\n"); - rc = -ENOMEM; - goto out; + return -ENOMEM; } /* @@ -531,7 +517,7 @@ int swsusp_arch_resume(void) &phys_hibernate_exit); if (rc) { pr_err("Failed to create safe executable page for hibernate_exit code.\n"); - goto out; + return rc; } /* @@ -558,8 +544,7 @@ int swsusp_arch_resume(void) resume_hdr.reenter_kernel, restore_pblist, resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); -out: - return rc; + return 0; } int hibernate_resume_nonboot_cpu_disable(void) -- cgit v1.2.3 From 13373f0e658013b0412ee58ce8131de390ff1c44 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:21 -0500 Subject: arm64: hibernate: rename dst to page in create_safe_exec_page create_safe_exec_page() allocates a safe page and maps it at a specific location, also this function returns the physical address of newly allocated page. The destination VA, and PA are specified in arguments: dst_addr, phys_dst_addr However, within the function it uses "dst" which has unsigned long type, but is actually a pointers in the current virtual space. This is confusing to read. Rename dst to more appropriate page (page that is created), and also change its time to "void *" Signed-off-by: Pavel Tatashin Reviewed-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 83c41a2f8400..1ca8af685e96 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -198,18 +198,18 @@ static int create_safe_exec_page(void *src_start, size_t length, unsigned long dst_addr, phys_addr_t *phys_dst_addr) { + void *page = (void *)get_safe_page(GFP_ATOMIC); pgd_t *trans_pgd; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; - unsigned long dst = get_safe_page(GFP_ATOMIC); - if (!dst) + if (!page) return -ENOMEM; - memcpy((void *)dst, src_start, length); - __flush_icache_range(dst, dst + length); + memcpy(page, src_start, length); + __flush_icache_range((unsigned long)page, (unsigned long)page + length); trans_pgd = (void *)get_safe_page(GFP_ATOMIC); if (!trans_pgd) @@ -240,7 +240,7 @@ static int create_safe_exec_page(void *src_start, size_t length, } ptep = pte_offset_kernel(pmdp, dst_addr); - set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC)); + set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC)); /* * Load our new page tables. A strict BBM approach requires that we @@ -259,7 +259,7 @@ static int create_safe_exec_page(void *src_start, size_t length, write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1); isb(); - *phys_dst_addr = virt_to_phys((void *)dst); + *phys_dst_addr = virt_to_phys(page); return 0; } -- cgit v1.2.3 From 7ea4088938b793d9a8d4e205b2fe54fba4cf9739 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:22 -0500 Subject: arm64: hibernate: add PUD_SECT_RDONLY There is PMD_SECT_RDONLY that is used in pud_* function which is confusing. Signed-off-by: Pavel Tatashin Acked-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/kernel/hibernate.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d9fbd433cc17..9961c7cee9c5 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -110,6 +110,7 @@ #define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1) #define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0) #define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0) +#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */ /* * Level 2 descriptor (PMD). diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 1ca8af685e96..ce60bceed357 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -436,7 +436,7 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, return -ENOMEM; } else { set_pud(dst_pudp, - __pud(pud_val(pud) & ~PMD_SECT_RDONLY)); + __pud(pud_val(pud) & ~PUD_SECT_RDONLY)); } } while (dst_pudp++, src_pudp++, addr = next, addr != end); -- cgit v1.2.3 From a2c2e67923ecb73c92028e9b128a9bd48d2e25e9 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:23 -0500 Subject: arm64: hibernate: add trans_pgd public functions trans_pgd_create_copy() and trans_pgd_map_page() are going to be the basis for new shared code that handles page tables for cases which are between kernels: kexec, and hibernate. Note: Eventually, get_safe_page() will be moved into a function pointer passed via argument, but for now keep it as is. Signed-off-by: Pavel Tatashin Reviewed-by: James Morse [will: Keep these functions static until kexec needs them] Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 93 ++++++++++++++++++++++++++++--------------- 1 file changed, 60 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index ce60bceed357..590963c9c609 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -182,39 +182,15 @@ int arch_hibernation_header_restore(void *addr) } EXPORT_SYMBOL(arch_hibernation_header_restore); -/* - * Copies length bytes, starting at src_start into an new page, - * perform cache maintentance, then maps it at the specified address low - * address as executable. - * - * This is used by hibernate to copy the code it needs to execute when - * overwriting the kernel text. This function generates a new set of page - * tables, which it loads into ttbr0. - * - * Length is provided as we probably only want 4K of data, even on a 64K - * page system. - */ -static int create_safe_exec_page(void *src_start, size_t length, - unsigned long dst_addr, - phys_addr_t *phys_dst_addr) +static int trans_pgd_map_page(pgd_t *trans_pgd, void *page, + unsigned long dst_addr, + pgprot_t pgprot) { - void *page = (void *)get_safe_page(GFP_ATOMIC); - pgd_t *trans_pgd; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; - if (!page) - return -ENOMEM; - - memcpy(page, src_start, length); - __flush_icache_range((unsigned long)page, (unsigned long)page + length); - - trans_pgd = (void *)get_safe_page(GFP_ATOMIC); - if (!trans_pgd) - return -ENOMEM; - pgdp = pgd_offset_raw(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { pudp = (void *)get_safe_page(GFP_ATOMIC); @@ -242,6 +218,44 @@ static int create_safe_exec_page(void *src_start, size_t length, ptep = pte_offset_kernel(pmdp, dst_addr); set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC)); + return 0; +} + +/* + * Copies length bytes, starting at src_start into an new page, + * perform cache maintenance, then maps it at the specified address low + * address as executable. + * + * This is used by hibernate to copy the code it needs to execute when + * overwriting the kernel text. This function generates a new set of page + * tables, which it loads into ttbr0. + * + * Length is provided as we probably only want 4K of data, even on a 64K + * page system. + */ +static int create_safe_exec_page(void *src_start, size_t length, + unsigned long dst_addr, + phys_addr_t *phys_dst_addr) +{ + void *page = (void *)get_safe_page(GFP_ATOMIC); + pgd_t *trans_pgd; + int rc; + + if (!page) + return -ENOMEM; + + memcpy(page, src_start, length); + __flush_icache_range((unsigned long)page, (unsigned long)page + length); + + trans_pgd = (void *)get_safe_page(GFP_ATOMIC); + if (!trans_pgd) + return -ENOMEM; + + rc = trans_pgd_map_page(trans_pgd, page, dst_addr, + PAGE_KERNEL_EXEC); + if (rc) + return rc; + /* * Load our new page tables. A strict BBM approach requires that we * ensure that TLBs are free of any entries that may overlap with the @@ -462,6 +476,24 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, return 0; } +static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start, + unsigned long end) +{ + int rc; + pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); + + if (!trans_pgd) { + pr_err("Failed to allocate memory for temporary page tables.\n"); + return -ENOMEM; + } + + rc = copy_page_tables(trans_pgd, start, end); + if (!rc) + *dst_pgdp = trans_pgd; + + return rc; +} + /* * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). * @@ -483,12 +515,7 @@ int swsusp_arch_resume(void) * Create a second copy of just the linear map, and use this when * restoring. */ - tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); - if (!tmp_pg_dir) { - pr_err("Failed to allocate memory for temporary page tables.\n"); - return -ENOMEM; - } - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END); + rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END); if (rc) return rc; -- cgit v1.2.3 From 3751e728cef2908c15974a5ae44627fd41ef3362 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Mon, 16 Dec 2019 11:12:47 +0900 Subject: arm64: kexec_file: add crash dump support Enabling crash dump (kdump) includes * prepare contents of ELF header of a core dump file, /proc/vmcore, using crash_prepare_elf64_headers(), and * add two device tree properties, "linux,usable-memory-range" and "linux,elfcorehdr", which represent respectively a memory range to be used by crash dump kernel and the header's location Signed-off-by: AKASHI Takahiro Cc: Catalin Marinas Cc: Will Deacon Reviewed-by: James Morse Tested-and-reviewed-by: Bhupesh Sharma Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 4 ++ arch/arm64/kernel/kexec_image.c | 4 -- arch/arm64/kernel/machine_kexec_file.c | 106 +++++++++++++++++++++++++++++++-- 3 files changed, 106 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 12a561a54128..d24b527e8c00 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {} struct kimage_arch { void *dtb; unsigned long dtb_mem; + /* Core ELF header buffer */ + void *elf_headers; + unsigned long elf_headers_mem; + unsigned long elf_headers_sz; }; extern const struct kexec_file_ops kexec_image_ops; diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c index 29a9428486a5..af9987c154ca 100644 --- a/arch/arm64/kernel/kexec_image.c +++ b/arch/arm64/kernel/kexec_image.c @@ -47,10 +47,6 @@ static void *image_load(struct kimage *image, struct kexec_segment *kernel_segment; int ret; - /* We don't support crash kernels yet. */ - if (image->type == KEXEC_TYPE_CRASH) - return ERR_PTR(-EOPNOTSUPP); - /* * We require a kernel with an unambiguous Image header. Per * Documentation/arm64/booting.rst, this is the case when image_size diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 7b08bf9499b6..dd3ae8081b38 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -17,12 +17,15 @@ #include #include #include +#include #include #include #include #include /* relevant device tree properties */ +#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr" +#define FDT_PROP_MEM_RANGE "linux,usable-memory-range" #define FDT_PROP_INITRD_START "linux,initrd-start" #define FDT_PROP_INITRD_END "linux,initrd-end" #define FDT_PROP_BOOTARGS "bootargs" @@ -40,6 +43,10 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) vfree(image->arch.dtb); image->arch.dtb = NULL; + vfree(image->arch.elf_headers); + image->arch.elf_headers = NULL; + image->arch.elf_headers_sz = 0; + return kexec_image_post_load_cleanup_default(image); } @@ -55,6 +62,31 @@ static int setup_dtb(struct kimage *image, off = ret; + ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR); + if (ret && ret != -FDT_ERR_NOTFOUND) + goto out; + ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE); + if (ret && ret != -FDT_ERR_NOTFOUND) + goto out; + + if (image->type == KEXEC_TYPE_CRASH) { + /* add linux,elfcorehdr */ + ret = fdt_appendprop_addrrange(dtb, 0, off, + FDT_PROP_KEXEC_ELFHDR, + image->arch.elf_headers_mem, + image->arch.elf_headers_sz); + if (ret) + return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); + + /* add linux,usable-memory-range */ + ret = fdt_appendprop_addrrange(dtb, 0, off, + FDT_PROP_MEM_RANGE, + crashk_res.start, + crashk_res.end - crashk_res.start + 1); + if (ret) + return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL); + } + /* add bootargs */ if (cmdline) { ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline); @@ -125,8 +157,8 @@ out: } /* - * More space needed so that we can add initrd, bootargs, kaslr-seed, and - * rng-seed. + * More space needed so that we can add initrd, bootargs, kaslr-seed, + * rng-seed, userable-memory-range and elfcorehdr. */ #define DTB_EXTRA_SPACE 0x1000 @@ -174,6 +206,43 @@ static int create_dtb(struct kimage *image, } } +static int prepare_elf_headers(void **addr, unsigned long *sz) +{ + struct crash_mem *cmem; + unsigned int nr_ranges; + int ret; + u64 i; + phys_addr_t start, end; + + nr_ranges = 1; /* for exclusion of crashkernel region */ + for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, + MEMBLOCK_NONE, &start, &end, NULL) + nr_ranges++; + + cmem = kmalloc(sizeof(struct crash_mem) + + sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL); + if (!cmem) + return -ENOMEM; + + cmem->max_nr_ranges = nr_ranges; + cmem->nr_ranges = 0; + for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, + MEMBLOCK_NONE, &start, &end, NULL) { + cmem->ranges[cmem->nr_ranges].start = start; + cmem->ranges[cmem->nr_ranges].end = end - 1; + cmem->nr_ranges++; + } + + /* Exclude crashkernel region */ + ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); + + if (!ret) + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); + + kfree(cmem); + return ret; +} + int load_other_segments(struct kimage *image, unsigned long kernel_load_addr, unsigned long kernel_size, @@ -181,14 +250,43 @@ int load_other_segments(struct kimage *image, char *cmdline) { struct kexec_buf kbuf; - void *dtb = NULL; - unsigned long initrd_load_addr = 0, dtb_len; + void *headers, *dtb = NULL; + unsigned long headers_sz, initrd_load_addr = 0, dtb_len; int ret = 0; kbuf.image = image; /* not allocate anything below the kernel */ kbuf.buf_min = kernel_load_addr + kernel_size; + /* load elf core header */ + if (image->type == KEXEC_TYPE_CRASH) { + ret = prepare_elf_headers(&headers, &headers_sz); + if (ret) { + pr_err("Preparing elf core header failed\n"); + goto out_err; + } + + kbuf.buffer = headers; + kbuf.bufsz = headers_sz; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf.memsz = headers_sz; + kbuf.buf_align = SZ_64K; /* largest supported page size */ + kbuf.buf_max = ULONG_MAX; + kbuf.top_down = true; + + ret = kexec_add_buffer(&kbuf); + if (ret) { + vfree(headers); + goto out_err; + } + image->arch.elf_headers = headers; + image->arch.elf_headers_mem = kbuf.mem; + image->arch.elf_headers_sz = headers_sz; + + pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + image->arch.elf_headers_mem, headers_sz, headers_sz); + } + /* load initrd */ if (initrd) { kbuf.buffer = initrd; -- cgit v1.2.3 From f7ef82c22fd76c300ae71c6dd19d9b03f4ab8253 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 8 Jan 2020 16:57:52 +0000 Subject: arm64: asm: Remove ENDPIPROC() Now all the users have been removed delete the definition of ENDPIPROC() to ensure we don't acquire any new users. Signed-off-by: Mark Brown Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 4a4258f17c86..e318d31bb979 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -461,18 +461,6 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU b.ne 9998b .endm -/* - * Deprecated! Use SYM_FUNC_{START,START_WEAK,END}_PI instead. - * Annotate a function as position independent, i.e., safe to be called before - * the kernel virtual mapping is activated. - */ -#define ENDPIPROC(x) \ - .globl __pi_##x; \ - .type __pi_##x, %function; \ - .set __pi_##x, x; \ - .size __pi_##x, . - x; \ - ENDPROC(x) - /* * Annotate a function as being unsuitable for kprobes. */ -- cgit v1.2.3 From b3c75c9d762b67de278c8847529b1e3a4c143a8e Mon Sep 17 00:00:00 2001 From: Anthony Steinhauser Date: Mon, 16 Dec 2019 14:53:47 -0800 Subject: Return ENODEV when the selected speculation misfeature is unsupported When the control of the selected speculation misbehavior is unsupported, the kernel should return ENODEV according to the documentation: https://www.kernel.org/doc/html/v4.17/userspace-api/spec_ctrl.html Current aarch64 implementation of SSB control sometimes returns EINVAL which is reserved for unimplemented prctl and for violations of reserved arguments. This change makes the aarch64 implementation consistent with the x86 implementation and with the documentation. Signed-off-by: Anthony Steinhauser Signed-off-by: Will Deacon --- arch/arm64/kernel/ssbd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c index 52cfc6148355..b26955f56750 100644 --- a/arch/arm64/kernel/ssbd.c +++ b/arch/arm64/kernel/ssbd.c @@ -37,7 +37,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) /* Unsupported */ if (state == ARM64_SSBD_UNKNOWN) - return -EINVAL; + return -ENODEV; /* Treat the unaffected/mitigated state separately */ if (state == ARM64_SSBD_MITIGATED) { @@ -102,7 +102,7 @@ static int ssbd_prctl_get(struct task_struct *task) { switch (arm64_get_ssbd_state()) { case ARM64_SSBD_UNKNOWN: - return -EINVAL; + return -ENODEV; case ARM64_SSBD_FORCE_ENABLE: return PR_SPEC_DISABLE; case ARM64_SSBD_KERNEL: -- cgit v1.2.3 From 31d868c49017140970d3a4ae45ab11d67618011c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Jan 2020 14:54:12 -0800 Subject: arm64: kpti: Add Broadcom Brahma-B53 core to the KPTI whitelist Broadcom Brahma-B53 CPUs do not implement ID_AA64PFR0_EL1.CSV3 but are not susceptible to Meltdown, so add all Brahma-B53 part numbers to kpti_safe_list[]. Signed-off-by: Florian Fainelli Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..0427b72c960b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -975,6 +975,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static const struct midr_range kpti_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), -- cgit v1.2.3 From 26415330a754b464b14a8e1afabeaba3381ba1eb Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Wed, 11 Dec 2019 15:27:33 +0800 Subject: arm64: armv8_deprecated: update the comments of armv8_deprecated_init() In commit c0d8832e78cb ("arm64: Ensure the instruction emulation is ready for userspace"), armv8_deprecated_init() was promoted to core_initcall() but the comments were left unchanged, update it now. Spotted by some random reading of the code. Signed-off-by: Hanjun Guo [will: "can guarantee" => "guarantees"] Signed-off-by: Will Deacon --- arch/arm64/kernel/armv8_deprecated.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index ca158be21f83..7832b3216370 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -618,7 +618,8 @@ static struct insn_emulation_ops setend_ops = { }; /* - * Invoked as late_initcall, since not needed before init spawned. + * Invoked as core_initcall, which guarantees that the instruction + * emulation is ready for userspace. */ static int __init armv8_deprecated_init(void) { -- cgit v1.2.3 From 3c9c1dcde7c3a6c6203686ccd620620ae5b2a905 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 31 Dec 2019 01:54:57 -0800 Subject: arm64: Kconfig: Remove CONFIG_ prefix from ARM64_PSEUDO_NMI section Remove the CONFIG_ prefix from the select statement for ARM_GIC_V3. Acked-by: Catalin Marinas Signed-off-by: Joe Perches Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..e9b1fc22f72e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1544,7 +1544,7 @@ config ARM64_MODULE_PLTS config ARM64_PSEUDO_NMI bool "Support for NMI-like interrupts" - select CONFIG_ARM_GIC_V3 + select ARM_GIC_V3 help Adds support for mimicking Non-Maskable Interrupts through the use of GIC interrupt priority. This support requires version 3 or later of -- cgit v1.2.3 From 4e410ef96ce6c8d2e2b69d09ab5d44dc8d5352e4 Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Sat, 21 Dec 2019 08:32:38 +0000 Subject: arm64: Remove __exception_text_start and __exception_text_end from asm/section.h Linux commit b6e43c0e3129 ("arm64: remove __exception annotations") has removed __exception_text_start and __exception_text_end sections. So removing reference of __exception_text_start and __exception_text_end from from asm/section.h. Cc: James Morse Reviewed-by: Anshuman Khandual Signed-off-by: Prabhakar Kushwaha Signed-off-by: Will Deacon --- arch/arm64/include/asm/sections.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 25a73aab438f..3994169985ef 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -8,7 +8,6 @@ #include extern char __alt_instructions[], __alt_instructions_end[]; -extern char __exception_text_start[], __exception_text_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; -- cgit v1.2.3 From edf90818271b86dcfcc56e0380a8f7fac6eed0cc Mon Sep 17 00:00:00 2001 From: Pan Zhang Date: Tue, 17 Dec 2019 21:34:04 +0800 Subject: mm: change_memory_common: add spaces for `*` operator Leaves one space before and after a binary operator both, it may be more elegant. Signed-off-by: Pan Zhang Signed-off-by: Will Deacon --- arch/arm64/mm/pageattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 9ce7bd9d4d9c..250c49008d73 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -54,7 +54,7 @@ static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; - unsigned long size = PAGE_SIZE*numpages; + unsigned long size = PAGE_SIZE * numpages; unsigned long end = start + size; struct vm_struct *area; int i; -- cgit v1.2.3 From b6a5c58240be8a9521358a4efcaadaa5e3586062 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 19 Dec 2019 12:25:32 +0000 Subject: arm64: xen: Use modern annotations for assembly functions In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC. Update the annotations in the xen code to the new macros. Signed-off-by: Mark Brown Reviewed-by: Julien Grall Reviewed-by: Stefano Stabellini Signed-off-by: Will Deacon --- arch/arm64/xen/hypercall.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index c5f05c4a4d00..5b09aca55108 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -56,11 +56,11 @@ #define XEN_IMM 0xEA1 #define HYPERCALL_SIMPLE(hypercall) \ -ENTRY(HYPERVISOR_##hypercall) \ +SYM_FUNC_START(HYPERVISOR_##hypercall) \ mov x16, #__HYPERVISOR_##hypercall; \ hvc XEN_IMM; \ ret; \ -ENDPROC(HYPERVISOR_##hypercall) +SYM_FUNC_END(HYPERVISOR_##hypercall) #define HYPERCALL0 HYPERCALL_SIMPLE #define HYPERCALL1 HYPERCALL_SIMPLE @@ -86,7 +86,7 @@ HYPERCALL2(multicall); HYPERCALL2(vm_assist); HYPERCALL3(dm_op); -ENTRY(privcmd_call) +SYM_FUNC_START(privcmd_call) mov x16, x0 mov x0, x1 mov x1, x2 @@ -109,4 +109,4 @@ ENTRY(privcmd_call) */ uaccess_ttbr0_disable x6, x7 ret -ENDPROC(privcmd_call); +SYM_FUNC_END(privcmd_call); -- cgit v1.2.3 From 1595fe299eb5a664c754eaf48bc178c0d664e1cf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 Jan 2020 16:00:50 +0000 Subject: Revert "arm64: kexec: make dtb_mem always enabled" Adding crash dump support to 'kexec_file' is going to extend 'struct kimage_arch' with more 'kexec_file'-specific members. The cleanup here then starts to get in the way, so revert it. This reverts commit 621516789ee6e285cb2088fe4706eedd030d38bf. Reported-by: AKASHI Takahiro Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 4 ++-- arch/arm64/kernel/machine_kexec.c | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index ad6afed69078..12a561a54128 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,14 +90,14 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +#ifdef CONFIG_KEXEC_FILE #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { void *dtb; - phys_addr_t dtb_mem; + unsigned long dtb_mem; }; -#ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; struct kimage; diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index ae1bad0156cd..8e9c924423b4 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -203,7 +203,11 @@ void machine_kexec(struct kimage *kimage) * In kexec_file case, the kernel starts directly without purgatory. */ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, - kimage->arch.dtb_mem); +#ifdef CONFIG_KEXEC_FILE + kimage->arch.dtb_mem); +#else + 0); +#endif BUG(); /* Should never get here. */ } -- cgit v1.2.3 From 73d6890fe8ff40e357039b626537ac82d8782aeb Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 10 Jan 2020 15:50:04 +0000 Subject: arm64: kernel: Correct annotation of end of el0_sync Commit 582f95835a8fc812c ("arm64: entry: convert el0_sync to C") caused the ENDPROC() annotating the end of el0_sync to be placed after the code for el0_sync_compat. This replaced the previous annotation where it was located after all the cases that are now converted to C, including after the currently unannotated el0_irq_compat and el0_error_compat. Move the annotation to the end of the function and add separate annotations for the _compat ones. Fixes: 582f95835a8fc812c (arm64: entry: convert el0_sync to C) Signed-off-by: Mark Brown Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7c6a0a41676f..d54d165b286a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -653,6 +653,7 @@ el0_sync: mov x0, sp bl el0_sync_handler b ret_to_user +ENDPROC(el0_sync) #ifdef CONFIG_COMPAT .align 6 @@ -661,16 +662,18 @@ el0_sync_compat: mov x0, sp bl el0_sync_compat_handler b ret_to_user -ENDPROC(el0_sync) +ENDPROC(el0_sync_compat) .align 6 el0_irq_compat: kernel_entry 0, 32 b el0_irq_naked +ENDPROC(el0_irq_compat) el0_error_compat: kernel_entry 0, 32 b el0_error_naked +ENDPROC(el0_error_compat) #endif .align 6 -- cgit v1.2.3 From b51c6ac220f77eb246e940442d970b4065c197b0 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:17 +0000 Subject: arm64: Introduce system_capabilities_finalized() marker We finalize the system wide capabilities after the SMP CPUs are booted by the kernel. This is used as a marker for deciding various checks in the kernel. e.g, sanity check the hotplugged CPUs for missing mandatory features. However there is no explicit helper available for this in the kernel. There is sys_caps_initialised, which is not exposed. The other closest one we have is the jump_label arm64_const_caps_ready which denotes that the capabilities are set and the capability checks could use the individual jump_labels for fast path. This is performed before setting the ELF Hwcaps, which must be checked against the new CPUs. We also perform some of the other initialization e.g, SVE setup, which is important for the use of FP/SIMD where SVE is supported. Normally userspace doesn't get to run before we finish this. However the in-kernel users may potentially start using the neon mode. So, we need to reject uses of neon mode before we are set. Instead of defining a new marker for the completion of SVE setup, we could simply reuse the arm64_const_caps_ready and enable it once we have finished all the setup. Also we could expose this to the various users as "system_capabilities_finalized()" to make it more meaningful than "const_caps_ready". Cc: Ard Biesheuvel Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 5 +++++ arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kernel/cpufeature.c | 26 +++++++++----------------- arch/arm64/kernel/process.c | 2 +- 4 files changed, 16 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4261d55e8506..92ef9539874a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -613,6 +613,11 @@ static inline bool system_has_prio_mask_debugging(void) system_uses_irq_prio_masking(); } +static inline bool system_capabilities_finalized(void) +{ + return static_branch_likely(&arm64_const_caps_ready); +} + #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c61260cf63c5..48ce54639eb5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -547,7 +547,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * wrong, and hyp will crash and burn when it uses any * cpus_have_const_cap() wrapper. */ - BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); + BUG_ON(!system_capabilities_finalized()); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); /* diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..d25ad65bfac2 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -53,13 +53,14 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); * will be used to determine if a new booting CPU should * go through the verification process to make sure that it * supports the system capabilities, without using a hotplug - * notifier. + * notifier. This is also used to decide if we could use + * the fast path for checking constant CPU caps. */ -static bool sys_caps_initialised; - -static inline void set_sys_caps_initialised(void) +DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); +EXPORT_SYMBOL(arm64_const_caps_ready); +static inline void finalize_system_capabilities(void) { - sys_caps_initialised = true; + static_branch_enable(&arm64_const_caps_ready); } static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p) @@ -785,7 +786,7 @@ void update_cpu_features(int cpu, /* Probe vector lengths, unless we already gave up on SVE */ if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && - !sys_caps_initialised) + !system_capabilities_finalized()) sve_update_vq_map(); } @@ -1974,7 +1975,7 @@ void check_local_cpu_capabilities(void) * Otherwise, this CPU should verify that it has all the system * advertised capabilities. */ - if (!sys_caps_initialised) + if (!system_capabilities_finalized()) update_cpu_capabilities(SCOPE_LOCAL_CPU); else verify_local_cpu_capabilities(); @@ -1988,14 +1989,6 @@ static void __init setup_boot_cpu_capabilities(void) enable_cpu_capabilities(SCOPE_BOOT_CPU); } -DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); -EXPORT_SYMBOL(arm64_const_caps_ready); - -static void __init mark_const_caps_ready(void) -{ - static_branch_enable(&arm64_const_caps_ready); -} - bool this_cpu_has_cap(unsigned int n) { if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { @@ -2054,7 +2047,6 @@ void __init setup_cpu_features(void) u32 cwg; setup_system_capabilities(); - mark_const_caps_ready(); setup_elf_hwcaps(arm64_elf_hwcaps); if (system_supports_32bit_el0()) @@ -2067,7 +2059,7 @@ void __init setup_cpu_features(void) minsigstksz_setup(); /* Advertise that we have computed the system capabilities */ - set_sys_caps_initialised(); + finalize_system_capabilities(); /* * Check for sane CTR_EL0.CWG value. diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71f788cd2b18..48a38144ea7b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -646,6 +646,6 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) * Only allow a task to be preempted once cpufeatures have been * enabled. */ - if (static_branch_likely(&arm64_const_caps_ready)) + if (system_capabilities_finalized()) preempt_schedule_irq(); } -- cgit v1.2.3 From 0cd82feb017e282b29e725801b7c90da69316734 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:18 +0000 Subject: arm64: fpsimd: Make sure SVE setup is complete before SIMD is used In-kernel users of NEON rely on may_use_simd() to check if the SIMD can be used. However, we must initialize the SVE before SIMD can be used. Add a sanity check to make sure that we have completed the SVE setup before anyone uses the SIMD. Cc: Ard Biesheuvel Cc: Mark Rutland Cc: Will Deacon Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/simd.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index 7434844036d3..89cba2622b79 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -26,6 +26,8 @@ DECLARE_PER_CPU(bool, fpsimd_context_busy); static __must_check inline bool may_use_simd(void) { /* + * We must make sure that the SVE has been initialized properly + * before using the SIMD in kernel. * fpsimd_context_busy is only set while preemption is disabled, * and is clear whenever preemption is enabled. Since * this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy @@ -33,8 +35,10 @@ static __must_check inline bool may_use_simd(void) * migrated, and if it's clear we cannot be migrated to a CPU * where it is set. */ - return !in_irq() && !irqs_disabled() && !in_nmi() && - !this_cpu_read(fpsimd_context_busy); + return !WARN_ON(!system_capabilities_finalized()) && + system_supports_fpsimd() && + !in_irq() && !irqs_disabled() && !in_nmi() && + !this_cpu_read(fpsimd_context_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ -- cgit v1.2.3 From 449443c03d8cfdacf7313e17779a2594ebf87e6d Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:19 +0000 Subject: arm64: cpufeature: Fix the type of no FP/SIMD capability The NO_FPSIMD capability is defined with scope SYSTEM, which implies that the "absence" of FP/SIMD on at least one CPU is detected only after all the SMP CPUs are brought up. However, we use the status of this capability for every context switch. So, let us change the scope to LOCAL_CPU to allow the detection of this capability as and when the first CPU without FP is brought up. Also, the current type allows hotplugged CPU to be brought up without FP/SIMD when all the current CPUs have FP/SIMD and we have the userspace up. Fix both of these issues by changing the capability to BOOT_RESTRICTED_LOCAL_CPU_FEATURE. Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD") Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d25ad65bfac2..2fc9f18e2d2d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1369,7 +1369,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, .min_field_value = 0, .matches = has_no_fpsimd, }, -- cgit v1.2.3 From 7559950aef1ab8792c50797c6c5c7c5150a02460 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:20 +0000 Subject: arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly We set the compat_elf_hwcap bits unconditionally on arm64 to include the VFP and NEON support. However, the FP/SIMD unit is optional on Arm v8 and thus could be missing. We already handle this properly in the kernel, but still advertise to the COMPAT applications that the VFP is available. Fix this to make sure we only advertise when we really have them. Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD") Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2fc9f18e2d2d..c008164b0848 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly; #define COMPAT_ELF_HWCAP_DEFAULT \ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ - COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ - COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ COMPAT_HWCAP_LPAE) unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; unsigned int compat_elf_hwcap2 __read_mostly; @@ -1597,6 +1595,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .match_list = list, \ } +#define HWCAP_CAP_MATCH(match, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = match, \ + } + #ifdef CONFIG_ARM64_PTR_AUTH static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { { @@ -1670,8 +1674,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { {}, }; +#ifdef CONFIG_COMPAT +static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) +{ + /* + * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available, + * in line with that of arm32 as in vfp_init(). We make sure that the + * check is future proof, by making sure value is non-zero. + */ + u32 mvfr1; + + WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); + if (scope == SCOPE_SYSTEM) + mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1); + else + mvfr1 = read_sysreg_s(SYS_MVFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT); +} +#endif + static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { #ifdef CONFIG_COMPAT + HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), -- cgit v1.2.3 From c9d66999f064947e6b577ceacc1eb2fbca6a8d3c Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:21 +0000 Subject: arm64: ptrace: nofpsimd: Fail FP/SIMD regset operations When fp/simd is not supported on the system, fail the operations of FP/SIMD regsets. Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD") Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/ptrace.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6771c399d40c..cd6e5fa48b9c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, return 0; } +static int fpr_active(struct task_struct *target, const struct user_regset *regset) +{ + if (!system_supports_fpsimd()) + return -ENODEV; + return regset->n; +} + /* * TODO: update fp accessors for lazy context switching (sync/flush hwstate) */ @@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + if (!system_supports_fpsimd()) + return -EINVAL; + if (target == current) fpsimd_preserve_current_state(); @@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, { int ret; + if (!system_supports_fpsimd()) + return -EINVAL; + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); if (ret) return ret; @@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = { */ .size = sizeof(u32), .align = sizeof(u32), + .active = fpr_active, .get = fpr_get, .set = fpr_set }, @@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target, compat_ulong_t fpscr; int ret, vregs_end_pos; + if (!system_supports_fpsimd()) + return -EINVAL; + uregs = &target->thread.uw.fpsimd_state; if (target == current) @@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target, compat_ulong_t fpscr; int ret, vregs_end_pos; + if (!system_supports_fpsimd()) + return -EINVAL; + uregs = &target->thread.uw.fpsimd_state; vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); @@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = { .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), .size = sizeof(compat_ulong_t), .align = sizeof(compat_ulong_t), + .active = fpr_active, .get = compat_vfp_get, .set = compat_vfp_set }, -- cgit v1.2.3 From 6d502b6ba1b267b35a70708aa0f61bbd05cf065b Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:22 +0000 Subject: arm64: signal: nofpsimd: Handle fp/simd context for signal frames Make sure we try to save/restore the vfp/fpsimd context for signal handling only when the fp/simd support is available. Otherwise, skip the frames. Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/signal.c | 6 ++++-- arch/arm64/kernel/signal32.c | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index dd2cdc0d5be2..339882db5a91 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -371,6 +371,8 @@ static int parse_user_sigframe(struct user_ctxs *user, goto done; case FPSIMD_MAGIC: + if (!system_supports_fpsimd()) + goto invalid; if (user->fpsimd) goto invalid; @@ -506,7 +508,7 @@ static int restore_sigframe(struct pt_regs *regs, if (err == 0) err = parse_user_sigframe(&user, sf); - if (err == 0) { + if (err == 0 && system_supports_fpsimd()) { if (!user.fpsimd) return -EINVAL; @@ -623,7 +625,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); - if (err == 0) { + if (err == 0 && system_supports_fpsimd()) { struct fpsimd_context __user *fpsimd_ctx = apply_user_offset(user, user->fpsimd_offset); err |= preserve_fpsimd_context(fpsimd_ctx); diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 12a585386c2f..82feca6f7052 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -223,7 +223,7 @@ static int compat_restore_sigframe(struct pt_regs *regs, err |= !valid_user_regs(®s->user_regs, current); aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; - if (err == 0) + if (err == 0 && system_supports_fpsimd()) err |= compat_restore_vfp_context(&aux->vfp); return err; @@ -419,7 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; - if (err == 0) + if (err == 0 && system_supports_fpsimd()) err |= compat_preserve_vfp_context(&aux->vfp); __put_user_error(0, &aux->end_magic, err); -- cgit v1.2.3 From 52f73c383b2418f2d31b798e765ae7d596c35021 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:23 +0000 Subject: arm64: nofpsmid: Handle TIF_FOREIGN_FPSTATE flag cleanly We detect the absence of FP/SIMD after an incapable CPU is brought up, and by then we have kernel threads running already with TIF_FOREIGN_FPSTATE set which could be set for early userspace applications (e.g, modprobe triggered from initramfs) and init. This could cause the applications to loop forever in do_nofity_resume() as we never clear the TIF flag, once we now know that we don't support FP. Fix this by making sure that we clear the TIF_FOREIGN_FPSTATE flag for tasks which may have them set, as we would have done in the normal case, but avoiding touching the hardware state (since we don't support any). Also to make sure we handle the cases seemlessly we categorise the helper functions to two : 1) Helpers for common core code, which calls into take appropriate actions without knowing the current FPSIMD state of the CPU/task. e.g fpsimd_restore_current_state(), fpsimd_flush_task_state(), fpsimd_save_and_flush_cpu_state(). We bail out early for these functions, taking any appropriate actions (e.g, clearing the TIF flag) where necessary to hide the handling from core code. 2) Helpers used when the presence of FP/SIMD is apparent. i.e, save/restore the FP/SIMD register state, modify the CPU/task FP/SIMD state. e.g, fpsimd_save(), task_fpsimd_load() - save/restore task FP/SIMD registers fpsimd_bind_task_to_cpu() \ - Update the "state" metadata for CPU/task. fpsimd_bind_state_to_cpu() / fpsimd_update_current_state() - Update the fp/simd state for the current task from memory. These must not be called in the absence of FP/SIMD. Put in a WARNING to make sure they are not invoked in the absence of FP/SIMD. KVM also uses the TIF_FOREIGN_FPSTATE flag to manage the FP/SIMD state on the CPU. However, without FP/SIMD support we trap all accesses and inject undefined instruction. Thus we should never "load" guest state. Add a sanity check to make sure this is valid. Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD") Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Acked-by: Marc Zyngier Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 30 +++++++++++++++++++++++++++--- arch/arm64/kvm/hyp/switch.c | 10 +++++++++- 2 files changed, 36 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 3eb338f14386..94289d126993 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task) */ static void task_fpsimd_load(void) { + WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); if (system_supports_sve() && test_thread_flag(TIF_SVE)) @@ -289,6 +290,7 @@ static void fpsimd_save(void) this_cpu_ptr(&fpsimd_last_state); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ + WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { @@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void) struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); + WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; last->sve_vl = current->thread.sve_vl; @@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); + WARN_ON(!system_supports_fpsimd()); WARN_ON(!in_softirq() && !irqs_disabled()); last->st = st; @@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, */ void fpsimd_restore_current_state(void) { - if (!system_supports_fpsimd()) + /* + * For the tasks that were created before we detected the absence of + * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), + * e.g, init. This could be then inherited by the children processes. + * If we later detect that the system doesn't support FP/SIMD, + * we must clear the flag for all the tasks to indicate that the + * FPSTATE is clean (as we can't have one) to avoid looping for ever in + * do_notify_resume(). + */ + if (!system_supports_fpsimd()) { + clear_thread_flag(TIF_FOREIGN_FPSTATE); return; + } get_cpu_fpsimd_context(); @@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void) */ void fpsimd_update_current_state(struct user_fpsimd_state const *state) { - if (!system_supports_fpsimd()) + if (WARN_ON(!system_supports_fpsimd())) return; get_cpu_fpsimd_context(); @@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) void fpsimd_flush_task_state(struct task_struct *t) { t->thread.fpsimd_cpu = NR_CPUS; - + /* + * If we don't support fpsimd, bail out after we have + * reset the fpsimd_cpu for this task and clear the + * FPSTATE. + */ + if (!system_supports_fpsimd()) + return; barrier(); set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); @@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t) */ static void fpsimd_flush_cpu_state(void) { + WARN_ON(!system_supports_fpsimd()); __this_cpu_write(fpsimd_last_state.st, NULL); set_thread_flag(TIF_FOREIGN_FPSTATE); } @@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void) */ void fpsimd_save_and_flush_cpu_state(void) { + if (!system_supports_fpsimd()) + return; WARN_ON(preemptible()); __get_cpu_fpsimd_context(); fpsimd_save(); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 72fbbd86eb5e..e5816d885761 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -28,7 +28,15 @@ /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { - if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) + /* + * When the system doesn't support FP/SIMD, we cannot rely on + * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an + * abort on the very first access to FP and thus we should never + * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always + * trap the accesses. + */ + if (!system_supports_fpsimd() || + vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_FP_HOST); -- cgit v1.2.3 From d4209d8b717311d114b5d47ba7f8249fd44e97c2 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 16 Dec 2019 11:33:37 +0000 Subject: arm64: cpufeature: Export matrix and other features to userspace Export the features introduced as part of ARMv8.6 exposed in the ID_AA64ISAR1_EL1 and ID_AA64ZFR0_EL1 registers. This introduces the Matrix features (ARMv8.2-I8MM, ARMv8.2-F64MM and ARMv8.2-F32MM) along with BFloat16 (Armv8.2-BF16), speculation invalidation (SPECRES) and Data Gathering Hint (ARMv8.0-DGH). Signed-off-by: Julien Grall [Added other features in those registers] Signed-off-by: Steven Price [will: Don't advertise SPECRES to userspace] Signed-off-by: Will Deacon --- arch/arm64/include/asm/hwcap.h | 7 +++++++ arch/arm64/include/asm/sysreg.h | 12 ++++++++++++ arch/arm64/include/uapi/asm/hwcap.h | 7 +++++++ arch/arm64/kernel/cpufeature.c | 19 +++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 7 +++++++ 5 files changed, 52 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 3d2f2472a36c..fcb390ea29ea 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -86,6 +86,13 @@ #define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4) #define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2) #define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT) +#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM) +#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM) +#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM) +#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16) +#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM) +#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH) +#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..f56c4a02a127 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -553,6 +553,10 @@ #define ID_AA64ISAR0_AES_SHIFT 4 /* id_aa64isar1 */ +#define ID_AA64ISAR1_I8MM_SHIFT 52 +#define ID_AA64ISAR1_DGH_SHIFT 48 +#define ID_AA64ISAR1_BF16_SHIFT 44 +#define ID_AA64ISAR1_SPECRES_SHIFT 40 #define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_FRINTTS_SHIFT 32 #define ID_AA64ISAR1_GPI_SHIFT 28 @@ -605,12 +609,20 @@ #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 /* id_aa64zfr0 */ +#define ID_AA64ZFR0_F64MM_SHIFT 56 +#define ID_AA64ZFR0_F32MM_SHIFT 52 +#define ID_AA64ZFR0_I8MM_SHIFT 44 #define ID_AA64ZFR0_SM4_SHIFT 40 #define ID_AA64ZFR0_SHA3_SHIFT 32 +#define ID_AA64ZFR0_BF16_SHIFT 20 #define ID_AA64ZFR0_BITPERM_SHIFT 16 #define ID_AA64ZFR0_AES_SHIFT 4 #define ID_AA64ZFR0_SVEVER_SHIFT 0 +#define ID_AA64ZFR0_F64MM 0x1 +#define ID_AA64ZFR0_F32MM 0x1 +#define ID_AA64ZFR0_I8MM 0x1 +#define ID_AA64ZFR0_BF16 0x1 #define ID_AA64ZFR0_SM4 0x1 #define ID_AA64ZFR0_SHA3 0x1 #define ID_AA64ZFR0_BITPERM 0x1 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index a1e72886b30c..e6dad5924703 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -65,5 +65,12 @@ #define HWCAP2_SVESM4 (1 << 6) #define HWCAP2_FLAGM2 (1 << 7) #define HWCAP2_FRINT (1 << 8) +#define HWCAP2_SVEI8MM (1 << 9) +#define HWCAP2_SVEF32MM (1 << 10) +#define HWCAP2_SVEF64MM (1 << 11) +#define HWCAP2_SVEBF16 (1 << 12) +#define HWCAP2_I8MM (1 << 13) +#define HWCAP2_BF16 (1 << 14) +#define HWCAP2_DGH (1 << 15) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..6e63cad1eda4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -135,6 +135,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), @@ -176,10 +180,18 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), @@ -1651,6 +1663,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), @@ -1658,8 +1673,12 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4), + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), #endif HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_PTR_AUTH diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 56bba746da1c..63f5dec1c730 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -84,6 +84,13 @@ static const char *const hwcap_str[] = { "svesm4", "flagm2", "frint", + "svei8mm", + "svef32mm", + "svef64mm", + "svebf16", + "i8mm", + "bf16", + "dgh", NULL }; -- cgit v1.2.3 From 8e3747beff8c4533bf4f1a61b53e061266ef57db Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 17 Dec 2019 20:17:32 +0530 Subject: arm64: Introduce ID_ISAR6 CPU register This adds basic building blocks required for ID_ISAR6 CPU register which identifies support for various instruction implementation on AArch32 state. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: James Morse Cc: Suzuki K Poulose Cc: Mark Rutland Cc: linux-kernel@vger.kernel.org Cc: kvmarm@lists.cs.columbia.edu Acked-by: Marc Zyngier Signed-off-by: Anshuman Khandual [will: Ensure SPECRES is treated the same as on A64] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpu.h | 1 + arch/arm64/include/asm/sysreg.h | 9 +++++++++ arch/arm64/kernel/cpufeature.c | 16 ++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + arch/arm64/kvm/sys_regs.c | 2 +- 5 files changed, 28 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index d72d995b7e25..b4a40535a3d8 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -39,6 +39,7 @@ struct cpuinfo_arm64 { u32 reg_id_isar3; u32 reg_id_isar4; u32 reg_id_isar5; + u32 reg_id_isar6; u32 reg_id_mmfr0; u32 reg_id_mmfr1; u32 reg_id_mmfr2; diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f56c4a02a127..6ad2db18d110 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -146,6 +146,7 @@ #define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) #define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) #define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) +#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7) #define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) @@ -691,6 +692,14 @@ #define ID_ISAR5_AES_SHIFT 4 #define ID_ISAR5_SEVL_SHIFT 0 +#define ID_ISAR6_I8MM_SHIFT 24 +#define ID_ISAR6_BF16_SHIFT 20 +#define ID_ISAR6_SPECRES_SHIFT 16 +#define ID_ISAR6_SB_SHIFT 12 +#define ID_ISAR6_FHM_SHIFT 8 +#define ID_ISAR6_DP_SHIFT 4 +#define ID_ISAR6_JSCVT_SHIFT 0 + #define MVFR0_FPROUND_SHIFT 28 #define MVFR0_FPSHVEC_SHIFT 24 #define MVFR0_FPSQRT_SHIFT 20 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6e63cad1eda4..4e7295813ab4 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -325,6 +325,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_isar6[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */ @@ -408,6 +419,7 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), + ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6), /* Op1 = 0, CRn = 0, CRm = 3 */ ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), @@ -612,6 +624,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); + init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); @@ -765,6 +778,8 @@ void update_cpu_features(int cpu, info->reg_id_isar4, boot->reg_id_isar4); taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, info->reg_id_isar5, boot->reg_id_isar5); + taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, + info->reg_id_isar6, boot->reg_id_isar6); /* * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and @@ -843,6 +858,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_ISAR3_EL1); read_sysreg_case(SYS_ID_ISAR4_EL1); read_sysreg_case(SYS_ID_ISAR5_EL1); + read_sysreg_case(SYS_ID_ISAR6_EL1); read_sysreg_case(SYS_MVFR0_EL1); read_sysreg_case(SYS_MVFR1_EL1); read_sysreg_case(SYS_MVFR2_EL1); diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 63f5dec1c730..9013b224591a 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -367,6 +367,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); + info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 9f2165937f7d..3e909b117f0c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1424,7 +1424,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_SANITISED(ID_ISAR4_EL1), ID_SANITISED(ID_ISAR5_EL1), ID_SANITISED(ID_MMFR4_EL1), - ID_UNALLOCATED(2,7), + ID_SANITISED(ID_ISAR6_EL1), /* CRm=3 */ ID_SANITISED(MVFR0_EL1), -- cgit v1.2.3 From 395af861377d14616c221831430f58e5786b92f1 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 15 Jan 2020 11:30:08 +0000 Subject: arm64: Move the LSE gas support detection to Kconfig As the Kconfig syntax gained support for $(as-instr) tests, move the LSE gas support detection from Makefile to the main arm64 Kconfig and remove the additional CONFIG_AS_LSE definition and check. Cc: Will Deacon Reviewed-by: Vladimir Murzin Tested-by: Vladimir Murzin Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 5 +++++ arch/arm64/Makefile | 11 ++++------- arch/arm64/include/asm/atomic_ll_sc.h | 2 +- arch/arm64/include/asm/lse.h | 6 +++--- arch/arm64/kernel/cpufeature.c | 4 ++-- 5 files changed, 15 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..cf3b6d2a67cf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1363,6 +1363,11 @@ config ARM64_PAN instruction if the cpu does not implement the feature. config ARM64_LSE_ATOMICS + bool + default ARM64_USE_LSE_ATOMICS + depends on $(as-instr,.arch_extension lse) + +config ARM64_USE_LSE_ATOMICS bool "Atomic instructions" depends on JUMP_LABEL default y diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 1fbe24d4fdb6..6dd8ecacc428 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -30,11 +30,8 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419 endif endif -# Check for binutils support for specific extensions -lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1) - -ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y) - ifeq ($(lseinstr),) +ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) + ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) $(warning LSE atomics not supported by binutils) endif endif @@ -53,11 +50,11 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \ +KBUILD_CFLAGS += -mgeneral-regs-only $(brokengasinst) \ $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) -KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso) +KBUILD_AFLAGS += $(brokengasinst) $(compat_vdso) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 7b012148bfd6..13869b76b58c 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -12,7 +12,7 @@ #include -#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE) +#ifdef CONFIG_ARM64_LSE_ATOMICS #define __LL_SC_FALLBACK(asm_ops) \ " b 3f\n" \ " .subsection 1\n" \ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 80b388278149..4e1009fff686 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -4,7 +4,7 @@ #include -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#ifdef CONFIG_ARM64_LSE_ATOMICS #include #include @@ -36,7 +36,7 @@ static inline bool system_uses_lse_atomics(void) #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) -#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#else /* CONFIG_ARM64_LSE_ATOMICS */ static inline bool system_uses_lse_atomics(void) { return false; } @@ -44,5 +44,5 @@ static inline bool system_uses_lse_atomics(void) { return false; } #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* CONFIG_ARM64_LSE_ATOMICS */ #endif /* __ASM_LSE_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..2595c2886d3f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1291,7 +1291,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_pan, }, #endif /* CONFIG_ARM64_PAN */ -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#ifdef CONFIG_ARM64_LSE_ATOMICS { .desc = "LSE atomic instructions", .capability = ARM64_HAS_LSE_ATOMICS, @@ -1302,7 +1302,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 2, }, -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* CONFIG_ARM64_LSE_ATOMICS */ { .desc = "Software prefetching using PRFM", .capability = ARM64_HAS_NO_HW_PREFETCH, -- cgit v1.2.3 From 3e6c69a058deaa50d33c3dac36cde80b4ce590e8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:14 +0000 Subject: arm64: Add initial support for E0PD Kernel Page Table Isolation (KPTI) is used to mitigate some speculation based security issues by ensuring that the kernel is not mapped when userspace is running but this approach is expensive and is incompatible with SPE. E0PD, introduced in the ARMv8.5 extensions, provides an alternative to this which ensures that accesses from userspace to the kernel's half of the memory map to always fault with constant time, preventing timing attacks without requiring constant unmapping and remapping or preventing legitimate accesses. Currently this feature will only be enabled if all CPUs in the system support E0PD, if some CPUs do not support the feature at boot time then the feature will not be enabled and in the unlikely event that a late CPU is the first CPU to lack the feature then we will reject that CPU. This initial patch does not yet integrate with KPTI, this will be dealt with in followup patches. Ideally we could ensure that by default we don't use KPTI on CPUs where E0PD is present. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose [will: Fixed typo in Kconfig text] Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 16 ++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 22 ++++++++++++++++++++++ 5 files changed, 43 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..9cee2008ea9e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1484,6 +1484,22 @@ config ARM64_PTR_AUTH endmenu +menu "ARMv8.5 architectural features" + +config ARM64_E0PD + bool "Enable support for E0PD" + default y + help + E0PD (part of the ARMv8.5 extensions) allows us to ensure + that EL0 accesses made via TTBR1 always fault in constant time, + providing similar benefits to KASLR as those provided by KPTI, but + with lower overhead and without disrupting legitimate access to + kernel memory such as SPE. + + This option enables E0PD for TTBR1 where available. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b92683871119..33ff25c1ab1b 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -56,7 +56,8 @@ #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 #define ARM64_WORKAROUND_1542419 47 #define ARM64_WORKAROUND_1319367 48 +#define ARM64_HAS_E0PD 49 -#define ARM64_NCAPS 49 +#define ARM64_NCAPS 50 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d9fbd433cc17..378566f4882e 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -292,6 +292,8 @@ #define TCR_HD (UL(1) << 40) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) +#define TCR_E0PD0 (UL(1) << 55) +#define TCR_E0PD1 (UL(1) << 56) /* * TTBR. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..b085258cfe4e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -655,6 +655,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..9d578e720168 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -225,6 +225,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), @@ -1251,6 +1252,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_E0PD +static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) +{ + if (this_cpu_has_cap(ARM64_HAS_E0PD)) + sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); +} +#endif /* CONFIG_ARM64_E0PD */ + #ifdef CONFIG_ARM64_PSEUDO_NMI static bool enable_pseudo_nmi; @@ -1566,6 +1575,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 1, }, +#endif +#ifdef CONFIG_ARM64_E0PD + { + .desc = "E0PD", + .capability = ARM64_HAS_E0PD, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64MMFR2_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR2_E0PD_SHIFT, + .matches = has_cpuid_feature, + .min_field_value = 1, + .cpu_enable = cpu_enable_e0pd, + }, #endif {}, }; -- cgit v1.2.3 From c2d92353b28f8542c6b3150900b38c94b1d61480 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:15 +0000 Subject: arm64: Factor out checks for KASLR in KPTI code into separate function In preparation for integrating E0PD support with KASLR factor out the checks for interaction between KASLR and KPTI done in boot context into a new function kaslr_requires_kpti(), in the process clarifying the distinction between what we do in boot context and what we do at runtime. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 62 ++++++++++++++++++++++++++++++------------ arch/arm64/kernel/cpufeature.c | 2 +- 2 files changed, 46 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index f217e3292919..2a93d34cc0ca 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -35,10 +35,46 @@ static inline bool arm64_kernel_unmapped_at_el0(void) cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } -static inline bool arm64_kernel_use_ng_mappings(void) +/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +static inline bool kaslr_requires_kpti(void) { bool tx1_bug; + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + /* + * Systems affected by Cavium erratum 24756 are incompatible + * with KPTI. + */ + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { + tx1_bug = false; +#ifndef MODULE + } else if (!static_branch_likely(&arm64_const_caps_ready)) { + extern const struct midr_range cavium_erratum_27456_cpus[]; + + tx1_bug = is_midr_in_range_list(read_cpuid_id(), + cavium_erratum_27456_cpus); +#endif + } else { + tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); + } + if (tx1_bug) + return false; + + return kaslr_offset() > 0; +} + +static inline bool arm64_kernel_use_ng_mappings(void) +{ /* What's a kpti? Use global mappings if we don't know. */ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) return false; @@ -52,29 +88,21 @@ static inline bool arm64_kernel_use_ng_mappings(void) if (arm64_kernel_unmapped_at_el0()) return true; - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + /* + * Once we are far enough into boot for capabilities to be + * ready we will have confirmed if we are using non-global + * mappings so don't need to consider anything else here. + */ + if (static_branch_likely(&arm64_const_caps_ready)) return false; /* * KASLR is enabled so we're going to be enabling kpti on non-broken * CPUs regardless of their susceptibility to Meltdown. Rather * than force everybody to go through the G -> nG dance later on, - * just put down non-global mappings from the beginning. + * just put down non-global mappings from the beginning */ - if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { - tx1_bug = false; -#ifndef MODULE - } else if (!static_branch_likely(&arm64_const_caps_ready)) { - extern const struct midr_range cavium_erratum_27456_cpus[]; - - tx1_bug = is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus); -#endif - } else { - tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); - } - - return !tx1_bug && kaslr_offset() > 0; + return kaslr_requires_kpti(); } typedef void (*bp_hardening_cb_t)(void); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9d578e720168..88aa5ab02926 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1009,7 +1009,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, } /* Useful for KASLR robustness */ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { + if (kaslr_requires_kpti()) { if (!__kpti_forced) { str = "KASLR"; __kpti_forced = 1; -- cgit v1.2.3 From 92ac6fd162b42628ebe50cc2f08d6a77759e7911 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:16 +0000 Subject: arm64: Don't use KPTI where we have E0PD Since E0PD is intended to fulfil the same role as KPTI we don't need to use KPTI on CPUs where E0PD is available, we can rely on E0PD instead. Change the check that forces KPTI on when KASLR is enabled to check for E0PD before doing so, CPUs with E0PD are not expected to be affected by meltdown so should not need to enable KPTI for other reasons. Since E0PD is a system capability we will still enable KPTI if any of the CPUs in the system lacks E0PD, this will rewrite any global mappings that were established in systems where some but not all CPUs support E0PD. We may transiently have a mix of global and non-global mappings while booting since we use the local CPU when deciding if KPTI will be required prior to completing CPU enumeration but any global mappings will be converted to non-global ones when KPTI is applied. KPTI can still be forced on from the command line if required. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 2a93d34cc0ca..1eec3971f0a9 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -47,10 +47,21 @@ static inline bool arm64_kernel_unmapped_at_el0(void) static inline bool kaslr_requires_kpti(void) { bool tx1_bug; + u64 ftr; if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return false; + /* + * E0PD does a similar job to KPTI so can be used instead + * where available. + */ + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { + ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) + return false; + } + /* * Systems affected by Cavium erratum 24756 are incompatible * with KPTI. -- cgit v1.2.3 From 09e3c22a86f6889db0e93fb29d9255081a126f64 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:17 +0000 Subject: arm64: Use a variable to store non-global mappings decision Refactor the code which checks to see if we need to use non-global mappings to use a variable instead of checking with the CPU capabilities each time, doing the initial check for KPTI early in boot before we start allocating memory so we still avoid transitioning to non-global mappings in common cases. Since this variable always matches our decision about non-global mappings this means we can also combine arm64_kernel_use_ng_mappings() and arm64_unmap_kernel_at_el0() into a single function, the variable simply stores the result and the decision code is elsewhere. We could just have the users check the variable directly but having a function makes it clear that these uses are read-only. The result is that we simplify the code a bit and reduces the amount of code executed at runtime. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 87 ++--------------------------------- arch/arm64/include/asm/pgtable-prot.h | 4 +- arch/arm64/kernel/cpufeature.c | 55 ++++++++++++++++++++-- arch/arm64/kernel/setup.c | 7 +++ 4 files changed, 65 insertions(+), 88 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 1eec3971f0a9..e4d862420bb4 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -29,91 +29,11 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -static inline bool arm64_kernel_unmapped_at_el0(void) -{ - return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && - cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); -} +extern bool arm64_use_ng_mappings; -/* - * This check is triggered during the early boot before the cpufeature - * is initialised. Checking the status on the local CPU allows the boot - * CPU to detect the need for non-global mappings and thus avoiding a - * pagetable re-write after all the CPUs are booted. This check will be - * anyway run on individual CPUs, allowing us to get the consistent - * state once the SMP CPUs are up and thus make the switch to non-global - * mappings if required. - */ -static inline bool kaslr_requires_kpti(void) -{ - bool tx1_bug; - u64 ftr; - - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return false; - - /* - * E0PD does a similar job to KPTI so can be used instead - * where available. - */ - if (IS_ENABLED(CONFIG_ARM64_E0PD)) { - ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); - if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) - return false; - } - - /* - * Systems affected by Cavium erratum 24756 are incompatible - * with KPTI. - */ - if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { - tx1_bug = false; -#ifndef MODULE - } else if (!static_branch_likely(&arm64_const_caps_ready)) { - extern const struct midr_range cavium_erratum_27456_cpus[]; - - tx1_bug = is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus); -#endif - } else { - tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); - } - if (tx1_bug) - return false; - - return kaslr_offset() > 0; -} - -static inline bool arm64_kernel_use_ng_mappings(void) +static inline bool arm64_kernel_unmapped_at_el0(void) { - /* What's a kpti? Use global mappings if we don't know. */ - if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) - return false; - - /* - * Note: this function is called before the CPU capabilities have - * been configured, so our early mappings will be global. If we - * later determine that kpti is required, then - * kpti_install_ng_mappings() will make them non-global. - */ - if (arm64_kernel_unmapped_at_el0()) - return true; - - /* - * Once we are far enough into boot for capabilities to be - * ready we will have confirmed if we are using non-global - * mappings so don't need to consider anything else here. - */ - if (static_branch_likely(&arm64_const_caps_ready)) - return false; - - /* - * KASLR is enabled so we're going to be enabling kpti on non-broken - * CPUs regardless of their susceptibility to Meltdown. Rather - * than force everybody to go through the G -> nG dance later on, - * just put down non-global mappings from the beginning - */ - return kaslr_requires_kpti(); + return arm64_use_ng_mappings; } typedef void (*bp_hardening_cb_t)(void); @@ -167,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool page_mappings_only); extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); +extern bool kaslr_requires_kpti(void); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 8dc6c5cdabe6..0a1fd95a8972 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -26,8 +26,8 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 88aa5ab02926..4a031111ceb5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -47,6 +47,9 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6 /* Need also bit for ARM64_CB_PATCH */ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +bool arm64_use_ng_mappings = false; +EXPORT_SYMBOL(arm64_use_ng_mappings); + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -966,6 +969,53 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) return has_cpuid_feature(entry, scope); } +/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +bool kaslr_requires_kpti(void) +{ + bool tx1_bug; + u64 ftr; + + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + /* + * E0PD does a similar job to KPTI so can be used instead + * where available. + */ + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { + ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) + return false; + } + + /* + * Systems affected by Cavium erratum 24756 are incompatible + * with KPTI. + */ + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { + tx1_bug = false; + } else if (!static_branch_likely(&arm64_const_caps_ready)) { + extern const struct midr_range cavium_erratum_27456_cpus[]; + + tx1_bug = is_midr_in_range_list(read_cpuid_id(), + cavium_erratum_27456_cpus); + } else { + tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); + } + if (tx1_bug) + return false; + + return kaslr_offset() > 0; +} + static bool __meltdown_safe = true; static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ @@ -1044,7 +1094,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) extern kpti_remap_fn idmap_kpti_install_ng_mappings; kpti_remap_fn *remap_fn; - static bool kpti_applied = false; int cpu = smp_processor_id(); /* @@ -1052,7 +1101,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) * it already or we have KASLR enabled and therefore have not * created any global mappings at all. */ - if (kpti_applied || kaslr_offset() > 0) + if (arm64_use_ng_mappings) return; remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); @@ -1062,7 +1111,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) cpu_uninstall_idmap(); if (!cpu) - kpti_applied = true; + arm64_use_ng_mappings = true; return; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 56f664561754..b6f9455d7ca3 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -285,6 +285,13 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = boot_command_line; + /* + * If know now we are going to need KPTI then use non-global + * mappings from the start, avoiding the cost of rewriting + * everything later. + */ + arm64_use_ng_mappings = kaslr_requires_kpti(); + early_fixmap_init(); early_ioremap_init(); -- cgit v1.2.3 From 8bf9284d99dcb1c8fbdfabde1979350cf41fa5f5 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 15 Jan 2020 14:18:25 +0000 Subject: arm64: Turn "broken gas inst" into real config option Use the new 'as-instr' Kconfig macro to define CONFIG_BROKEN_GAS_INST directly, making it available everywhere. Signed-off-by: Vladimir Murzin [will: Drop redundant 'y if' logic] Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 3 +++ arch/arm64/Makefile | 10 +++------- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cf3b6d2a67cf..8a35f776c7f1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -301,6 +301,9 @@ config ARCH_SUPPORTS_UPROBES config ARCH_PROC_KCORE_TEXT def_bool y +config BROKEN_GAS_INST + def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) + config KASAN_SHADOW_OFFSET hex depends on KASAN diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 6dd8ecacc428..dca1a97751ab 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -42,19 +42,15 @@ cc_has_k_constraint := $(call try-run,echo \ return 0; \ }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1) -ifeq ($(CONFIG_ARM64), y) -brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1) - - ifneq ($(brokengasinst),) +ifeq ($(CONFIG_BROKEN_GAS_INST),y) $(warning Detected assembler with broken .inst; disassembly will be unreliable) - endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(brokengasinst) \ +KBUILD_CFLAGS += -mgeneral-regs-only \ $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) -KBUILD_AFLAGS += $(brokengasinst) $(compat_vdso) +KBUILD_AFLAGS += $(compat_vdso) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) -- cgit v1.2.3 From ebac96ede61a1a46bdef12df3c1f9c20b00f2d88 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Jan 2020 13:59:58 +0000 Subject: arm64: Simplify early check for broken TX1 when KASLR is enabled Now that the decision to use non-global mappings is stored in a variable, the check to avoid enabling them for the terminally broken ThunderX1 platform can be simplified so that it is only keyed off the MIDR value. Acked-by: Suzuki K Poulose Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4a031111ceb5..d5242b44dc5a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -980,9 +980,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) */ bool kaslr_requires_kpti(void) { - bool tx1_bug; u64 ftr; - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return false; @@ -1000,18 +998,13 @@ bool kaslr_requires_kpti(void) * Systems affected by Cavium erratum 24756 are incompatible * with KPTI. */ - if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { - tx1_bug = false; - } else if (!static_branch_likely(&arm64_const_caps_ready)) { + if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { extern const struct midr_range cavium_erratum_27456_cpus[]; - tx1_bug = is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus); - } else { - tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); + if (is_midr_in_range_list(read_cpuid_id(), + cavium_erratum_27456_cpus)) + return false; } - if (tx1_bug) - return false; return kaslr_offset() > 0; } -- cgit v1.2.3 From a569f5f37203451d5378cb7de45f643bcb93d5e9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Jan 2020 14:06:37 +0000 Subject: arm64: Use register field helper in kaslr_requires_kpti() Rather than open-code the extraction of the E0PD field from the MMFR2 register, we can use the cpuid_feature_extract_unsigned_field() helper instead. Acked-by: Suzuki K Poulose Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d5242b44dc5a..1ebeb5bc17d2 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -980,7 +980,6 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) */ bool kaslr_requires_kpti(void) { - u64 ftr; if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return false; @@ -989,8 +988,9 @@ bool kaslr_requires_kpti(void) * where available. */ if (IS_ENABLED(CONFIG_ARM64_E0PD)) { - ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); - if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) + u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if (cpuid_feature_extract_unsigned_field(mmfr2, + ID_AA64MMFR2_E0PD_SHIFT)) return false; } -- cgit v1.2.3 From e85d68faed4e79fd0b481c72de8245d4290369db Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 16 Dec 2019 11:56:29 +0000 Subject: arm64: Rename WORKAROUND_1165522 to SPECULATIVE_AT_VHE Cortex-A55 is affected by a similar erratum, so rename the existing workaround for errarum 1165522 so it can be used for both errata. Acked-by: Marc Zyngier Reviewed-by: Suzuki K Poulose Signed-off-by: Steven Price Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 4 ++++ arch/arm64/include/asm/cpucaps.h | 2 +- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_hyp.h | 2 +- arch/arm64/kernel/cpu_errata.c | 17 +++++++++++++---- arch/arm64/kvm/hyp/switch.c | 2 +- arch/arm64/kvm/hyp/tlb.c | 4 ++-- 7 files changed, 23 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..b2f0df19e7f4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -514,9 +514,13 @@ config ARM64_ERRATUM_1418040 If unsure, say Y. +config ARM64_WORKAROUND_SPECULATIVE_AT_VHE + bool + config ARM64_ERRATUM_1165522 bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" default y + select ARM64_WORKAROUND_SPECULATIVE_AT_VHE help This option adds a workaround for ARM Cortex-A76 erratum 1165522. diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b92683871119..327a38a5162f 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -44,7 +44,7 @@ #define ARM64_SSBS 34 #define ARM64_WORKAROUND_1418040 35 #define ARM64_HAS_SB 36 -#define ARM64_WORKAROUND_1165522 37 +#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE 37 #define ARM64_HAS_ADDRESS_AUTH_ARCH 38 #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 #define ARM64_HAS_GENERIC_AUTH_ARCH 40 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c61260cf63c5..2a5f58857d12 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -571,7 +571,7 @@ static inline bool kvm_arch_requires_vhe(void) return true; /* Some implementations have defects that confine them to VHE */ - if (cpus_have_cap(ARM64_WORKAROUND_1165522)) + if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) return true; return false; diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 97f21cc66657..167a161dd596 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -95,7 +95,7 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) * before we can switch to the EL1/EL0 translation regime used by * the guest. */ - asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522)); + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); } #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 85f4bec22f6d..7886ddbafded 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -757,6 +757,16 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = { }; #endif +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE +static const struct midr_range erratum_speculative_at_vhe_list[] = { +#ifdef CONFIG_ARM64_ERRATUM_1165522 + /* Cortex A76 r0p0 to r2p0 */ + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), +#endif + {}, +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -883,12 +893,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), }, #endif -#ifdef CONFIG_ARM64_ERRATUM_1165522 +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE { - /* Cortex-A76 r0p0 to r2p0 */ .desc = "ARM erratum 1165522", - .capability = ARM64_WORKAROUND_1165522, - ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), + .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE, + ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list), }, #endif #ifdef CONFIG_ARM64_ERRATUM_1463225 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 72fbbd86eb5e..eefcaa6d839f 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -162,7 +162,7 @@ static void deactivate_traps_vhe(void) * before we can switch to the EL2/EL0 translation regime used by * the host. */ - asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522)); + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(vectors, vbar_el1); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index c2bc17ca6430..c827f3e0ba8f 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, local_irq_save(cxt->flags); - if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* * For CPUs that are affected by ARM erratum 1165522, we * cannot trust stage-1 to be in a correct state at that @@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); - if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->sctlr, SYS_SCTLR); -- cgit v1.2.3 From db0d46a58d34c7cd9d5ece98daf4b8afe3d770f8 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 16 Dec 2019 11:56:30 +0000 Subject: arm64: Rename WORKAROUND_1319367 to SPECULATIVE_AT_NVHE To match SPECULATIVE_AT_VHE let's also have a generic name for the NVHE variant. Acked-by: Marc Zyngier Reviewed-by: Suzuki K Poulose Signed-off-by: Steven Price Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 4 ++++ arch/arm64/include/asm/cpucaps.h | 2 +- arch/arm64/kernel/cpu_errata.c | 2 +- arch/arm64/kvm/hyp/switch.c | 4 ++-- arch/arm64/kvm/hyp/sysreg-sr.c | 4 ++-- arch/arm64/kvm/hyp/tlb.c | 4 ++-- 6 files changed, 12 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b2f0df19e7f4..d102ebd56c79 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -546,9 +546,13 @@ config ARM64_ERRATUM_1286807 invalidated has been observed by other observers. The workaround repeats the TLBI+DSB operation. +config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE + bool + config ARM64_ERRATUM_1319367 bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" default y + select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE help This option adds work arounds for ARM Cortex-A57 erratum 1319537 and A72 erratum 1319367 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 327a38a5162f..3d1aa1b02093 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -55,7 +55,7 @@ #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 #define ARM64_WORKAROUND_1542419 47 -#define ARM64_WORKAROUND_1319367 48 +#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_NCAPS 49 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 7886ddbafded..0332fca5564a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -934,7 +934,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_ERRATUM_1319367 { .desc = "ARM erratum 1319367", - .capability = ARM64_WORKAROUND_1319367, + .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE, ERRATA_MIDR_RANGE_LIST(ca57_a72), }, #endif diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index eefcaa6d839f..0fc824bdf258 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -119,7 +119,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) write_sysreg(val, cptr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; isb(); @@ -173,7 +173,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) { u64 mdcr_el2 = read_sysreg(mdcr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 22b8128d19f6..7672a978926c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); } else if (!ctxt->__hyp_running_vcpu) { @@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); - if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) && + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && ctxt->__hyp_running_vcpu) { /* * Must only be done for host registers, hence the context diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index c827f3e0ba8f..ff4e73c9bafc 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { - if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, { write_sysreg(0, vttbr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) { + if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { /* Ensure write of the host VMID */ isb(); /* Restore the host's TCR_EL1 */ -- cgit v1.2.3 From 275fa0ea2cf7a84450f9c0ec0d9e7ec168ed2e2d Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 16 Dec 2019 11:56:31 +0000 Subject: arm64: Workaround for Cortex-A55 erratum 1530923 Cortex-A55 erratum 1530923 allows TLB entries to be allocated as a result of a speculative AT instruction. This may happen in the middle of a guest world switch while the relevant VMSA configuration is in an inconsistent state, leading to erroneous content being allocated into TLBs. The same workaround as is used for Cortex-A76 erratum 1165522 (WORKAROUND_SPECULATIVE_AT_VHE) can be used here. Note that this mandates the use of VHE on affected parts. Acked-by: Marc Zyngier Signed-off-by: Steven Price Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 13 +++++++++++++ arch/arm64/include/asm/kvm_hyp.h | 4 ++-- arch/arm64/kernel/cpu_errata.c | 6 +++++- arch/arm64/kvm/hyp/switch.c | 4 ++-- arch/arm64/kvm/hyp/tlb.c | 4 ++-- 5 files changed, 24 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index d102ebd56c79..6c92c6dac45b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -530,6 +530,19 @@ config ARM64_ERRATUM_1165522 If unsure, say Y. +config ARM64_ERRATUM_1530923 + bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" + default y + select ARM64_WORKAROUND_SPECULATIVE_AT_VHE + help + This option adds a workaround for ARM Cortex-A55 erratum 1530923. + + Affected Cortex-A55 cores (r0p0, r0p1, r1p0, r2p0) could end-up with + corrupted TLBs by speculating an AT instruction during a guest + context switch. + + If unsure, say Y. + config ARM64_ERRATUM_1286807 bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation" default y diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 167a161dd596..a3a6a2ba9a63 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -91,8 +91,8 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); /* - * ARM erratum 1165522 requires the actual execution of the above - * before we can switch to the EL1/EL0 translation regime used by + * ARM errata 1165522 and 1530923 require the actual execution of the + * above before we can switch to the EL1/EL0 translation regime used by * the guest. */ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0332fca5564a..0bd2867f3248 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -762,6 +762,10 @@ static const struct midr_range erratum_speculative_at_vhe_list[] = { #ifdef CONFIG_ARM64_ERRATUM_1165522 /* Cortex A76 r0p0 to r2p0 */ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), +#endif +#ifdef CONFIG_ARM64_ERRATUM_1530923 + /* Cortex A55 r0p0 to r2p0 */ + MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), #endif {}, }; @@ -895,7 +899,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { #endif #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE { - .desc = "ARM erratum 1165522", + .desc = "ARM errata 1165522, 1530923", .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE, ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list), }, diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 0fc824bdf258..eae08ba82e95 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -158,8 +158,8 @@ static void deactivate_traps_vhe(void) write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); /* - * ARM erratum 1165522 requires the actual execution of the above - * before we can switch to the EL2/EL0 translation regime used by + * ARM errata 1165522 and 1530923 require the actual execution of the + * above before we can switch to the EL2/EL0 translation regime used by * the host. */ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index ff4e73c9bafc..92f560e3e1aa 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -25,8 +25,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* - * For CPUs that are affected by ARM erratum 1165522, we - * cannot trust stage-1 to be in a correct state at that + * For CPUs that are affected by ARM errata 1165522 or 1530923, + * we cannot trust stage-1 to be in a correct state at that * point. Since we do not want to force a full load of the * vcpu state, we prevent the EL1 page-table walker to * allocate new TLBs. This is done by setting the EPD bits -- cgit v1.2.3 From f88f42f853a80d9b087f0c2035d6fbab504ea54c Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 7 Jan 2020 10:28:03 +0000 Subject: arm64: context: Free up kernel ASIDs if KPTI is not in use We can extend user ASID space if it turns out that system does not require KPTI. We start with kernel ASIDs reserved because CPU caps are not finalized yet and free them up lazily on the next rollover if we confirm than KPTI is not in use. Reviewed-by: Catalin Marinas Signed-off-by: Vladimir Murzin Signed-off-by: Will Deacon --- arch/arm64/mm/context.c | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b5e329fde2dd..8ef73e89d514 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) -#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) -#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) -#else -#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define NUM_USER_ASIDS ASID_FIRST_VERSION #define asid2idx(asid) ((asid) & ~ASID_MASK) #define idx2asid(idx) asid2idx(idx) -#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void) } } +static void set_kpti_asid_bits(void) +{ + unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); + /* + * In case of KPTI kernel/user ASIDs are allocated in + * pairs, the bottom bit distinguishes the two: if it + * is set, then the ASID will map only userspace. Thus + * mark even as reserved for kernel. + */ + memset(asid_map, 0xaa, len); +} + +static void set_reserved_asid_bits(void) +{ + if (arm64_kernel_unmapped_at_el0()) + set_kpti_asid_bits(); + else + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); +} + static void flush_context(void) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + set_reserved_asid_bits(); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); @@ -261,6 +275,14 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); + /* + * We cannot call set_reserved_asid_bits() here because CPU + * caps are not finalized yet, so it is safer to assume KPTI + * and reserve kernel ASID's from beginning. + */ + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + set_kpti_asid_bits(); + pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } -- cgit v1.2.3 From 5777eaed566a1d63e344d3dd8f2b5e33be20643e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 15 Jan 2020 16:42:39 +0000 Subject: arm64: Implement optimised checksum routine Apparently there exist certain workloads which rely heavily on software checksumming, for which the generic do_csum() implementation becomes a significant bottleneck. Therefore let's give arm64 its own optimised version - for ease of maintenance this foregoes assembly or intrisics, and is thus not actually arm64-specific, but does rely heavily on C idioms that translate well to the A64 ISA and the typical load/store capabilities of most ARMv8 CPU cores. The resulting increase in checksum throughput scales nicely with buffer size, tending towards 4x for a small in-order core (Cortex-A53), and up to 6x or more for an aggressive big core (Ampere eMAG). Reported-by: Lingyan Huang Tested-by: Lingyan Huang Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/include/asm/checksum.h | 3 + arch/arm64/lib/Makefile | 6 +- arch/arm64/lib/csum.c | 123 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/lib/csum.c (limited to 'arch') diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index d064a50deb5f..8d2a7de39744 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } #define ip_fast_csum ip_fast_csum +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + #include #endif /* __ASM_CHECKSUM_H */ diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index c21b936dc01d..2fc253466dbf 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ - clear_page.o memchr.o memcpy.o memmove.o memset.o \ - memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ - strchr.o strrchr.o tishift.o + clear_page.o csum.o memchr.o memcpy.o memmove.o \ + memset.o memcmp.o strcmp.o strncmp.o strlen.o \ + strnlen.o strchr.o strrchr.o tishift.o ifeq ($(CONFIG_KERNEL_MODE_NEON), y) obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c new file mode 100644 index 000000000000..847eb725ce09 --- /dev/null +++ b/arch/arm64/lib/csum.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2019-2020 Arm Ltd. + +#include +#include +#include + +#include + +/* Looks dumb, but generates nice-ish code */ +static u64 accumulate(u64 sum, u64 data) +{ + __uint128_t tmp = (__uint128_t)sum + data; + return tmp + (tmp >> 64); +} + +unsigned int do_csum(const unsigned char *buff, int len) +{ + unsigned int offset, shift, sum; + const u64 *ptr; + u64 data, sum64 = 0; + + offset = (unsigned long)buff & 7; + /* + * This is to all intents and purposes safe, since rounding down cannot + * result in a different page or cache line being accessed, and @buff + * should absolutely not be pointing to anything read-sensitive. We do, + * however, have to be careful not to piss off KASAN, which means using + * unchecked reads to accommodate the head and tail, for which we'll + * compensate with an explicit check up-front. + */ + kasan_check_read(buff, len); + ptr = (u64 *)(buff - offset); + len = len + offset - 8; + + /* + * Head: zero out any excess leading bytes. Shifting back by the same + * amount should be at least as fast as any other way of handling the + * odd/even alignment, and means we can ignore it until the very end. + */ + shift = offset * 8; + data = READ_ONCE_NOCHECK(*ptr++); +#ifdef __LITTLE_ENDIAN + data = (data >> shift) << shift; +#else + data = (data << shift) >> shift; +#endif + + /* + * Body: straightforward aligned loads from here on (the paired loads + * underlying the quadword type still only need dword alignment). The + * main loop strictly excludes the tail, so the second loop will always + * run at least once. + */ + while (unlikely(len > 64)) { + __uint128_t tmp1, tmp2, tmp3, tmp4; + + tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); + tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2)); + tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4)); + tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6)); + + len -= 64; + ptr += 8; + + /* This is the "don't dump the carry flag into a GPR" idiom */ + tmp1 += (tmp1 >> 64) | (tmp1 << 64); + tmp2 += (tmp2 >> 64) | (tmp2 << 64); + tmp3 += (tmp3 >> 64) | (tmp3 << 64); + tmp4 += (tmp4 >> 64) | (tmp4 << 64); + tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64); + tmp1 += (tmp1 >> 64) | (tmp1 << 64); + tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64); + tmp3 += (tmp3 >> 64) | (tmp3 << 64); + tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64); + tmp1 += (tmp1 >> 64) | (tmp1 << 64); + tmp1 = ((tmp1 >> 64) << 64) | sum64; + tmp1 += (tmp1 >> 64) | (tmp1 << 64); + sum64 = tmp1 >> 64; + } + while (len > 8) { + __uint128_t tmp; + + sum64 = accumulate(sum64, data); + tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); + + len -= 16; + ptr += 2; + +#ifdef __LITTLE_ENDIAN + data = tmp >> 64; + sum64 = accumulate(sum64, tmp); +#else + data = tmp; + sum64 = accumulate(sum64, tmp >> 64); +#endif + } + if (len > 0) { + sum64 = accumulate(sum64, data); + data = READ_ONCE_NOCHECK(*ptr); + len -= 8; + } + /* + * Tail: zero any over-read bytes similarly to the head, again + * preserving odd/even alignment. + */ + shift = len * -8; +#ifdef __LITTLE_ENDIAN + data = (data << shift) >> shift; +#else + data = (data >> shift) << shift; +#endif + sum64 = accumulate(sum64, data); + + /* Finally, folding */ + sum64 += (sum64 >> 32) | (sum64 << 32); + sum = sum64 >> 32; + sum += (sum >> 16) | (sum << 16); + if (offset & 1) + return (u16)swab32(sum); + + return sum >> 16; +} -- cgit v1.2.3 From e0d5896bd356cd577f9710a02d7a474cdf58426b Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Thu, 31 Oct 2019 12:57:05 -0700 Subject: arm64: lse: fix LSE atomics with LLVM's integrated assembler Unlike gcc, clang considers each inline assembly block to be independent and therefore, when using the integrated assembler for inline assembly, any preambles that enable features must be repeated in each block. This change defines __LSE_PREAMBLE and adds it to each inline assembly block that has LSE instructions, which allows them to be compiled also with clang's assembler. Link: https://github.com/ClangBuiltLinux/linux/issues/671 Signed-off-by: Sami Tolvanen Tested-by: Andrew Murray Tested-by: Kees Cook Reviewed-by: Andrew Murray Reviewed-by: Kees Cook Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_lse.h | 19 +++++++++++++++++++ arch/arm64/include/asm/lse.h | 6 +++--- 2 files changed, 22 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 574808b9df4c..da3280f639cd 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -14,6 +14,7 @@ static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op " %w[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ @@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd) static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op #mb " %w[i], %w[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ @@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ u32 tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " add %w[i], %w[i], %w[tmp]" \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ @@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") static inline void __lse_atomic_and(int i, atomic_t *v) { asm volatile( + __LSE_PREAMBLE " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v) static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") static inline void __lse_atomic_sub(int i, atomic_t *v) { asm volatile( + __LSE_PREAMBLE " neg %w[i], %w[i]\n" " stadd %w[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ u32 tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " add %w[i], %w[i], %w[tmp]" \ @@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op " %[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ @@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd) static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op #mb " %[i], %[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ @@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " add %[i], %[i], %x[tmp]" \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ @@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { asm volatile( + __LSE_PREAMBLE " mvn %[i], %[i]\n" " stclr %[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v) static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) { asm volatile( + __LSE_PREAMBLE " neg %[i], %[i]\n" " stadd %[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " add %[i], %[i], %x[tmp]" \ @@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile( + __LSE_PREAMBLE "1: ldr %x[tmp], %[v]\n" " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" @@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " mov %" #w "[tmp], %" #w "[old]\n" \ " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ " mov %" #w "[ret], %" #w "[tmp]" \ @@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ asm volatile( \ + __LSE_PREAMBLE \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 80b388278149..73834996c4b6 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -6,6 +6,8 @@ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#define __LSE_PREAMBLE ".arch armv8-a+lse\n" + #include #include #include @@ -14,8 +16,6 @@ #include #include -__asm__(".arch_extension lse"); - extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; @@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void) /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ - ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) + ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS) #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ -- cgit v1.2.3 From c54f90c2627cc316d365e3073614731e17dbc631 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Thu, 31 Oct 2019 12:46:52 -0700 Subject: arm64: fix alternatives with LLVM's integrated assembler LLVM's integrated assembler fails with the following error when building KVM: :12:6: error: expected absolute expression .if kvm_update_va_mask == 0 ^ :21:6: error: expected absolute expression .if kvm_update_va_mask == 0 ^ :24:2: error: unrecognized instruction mnemonic NOT_AN_INSTRUCTION ^ LLVM ERROR: Error parsing inline asm These errors come from ALTERNATIVE_CB and __ALTERNATIVE_CFG, which test for the existence of the callback parameter in inline assembly using the following expression: " .if " __stringify(cb) " == 0\n" This works with GNU as, but isn't supported by LLVM. This change splits __ALTERNATIVE_CFG and ALTINSTR_ENTRY into separate macros to fix the LLVM build. Link: https://github.com/ClangBuiltLinux/linux/issues/472 Signed-off-by: Sami Tolvanen Tested-by: Nick Desaulniers Reviewed-by: Kees Cook Signed-off-by: Will Deacon --- arch/arm64/include/asm/alternative.h | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index b9f8d787eea9..324e7d5ab37e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif -#define ALTINSTR_ENTRY(feature,cb) \ +#define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ - " .else\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +#define ALTINSTR_ENTRY_CB(feature, cb) \ + " .word 661b - .\n" /* label */ \ " .word " __stringify(cb) "- .\n" /* callback */ \ - " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { } * * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature,cb) \ + ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - " .if " __stringify(cb) " == 0\n" \ ".pushsection .altinstr_replacement, \"a\"\n" \ "663:\n\t" \ newinstr "\n" \ @@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { } ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (662b-661b) + (664b-663b)\n" \ - ".else\n\t" \ + ".endif\n" + +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY_CB(feature, cb) \ + ".popsection\n" \ "663:\n\t" \ "664:\n\t" \ - ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) #else #include -- cgit v1.2.3 From e3ec6582d1cf1c4922200f7cddd96d5ee55235d2 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Fri, 6 Dec 2019 14:13:37 -0800 Subject: arm64: mm: avoid x18 in idmap_kpti_install_ng_mappings idmap_kpti_install_ng_mappings uses x18 as a temporary register, which will result in a conflict when x18 is reserved. Use x16 and x17 instead where needed. Signed-off-by: Sami Tolvanen Reviewed-by: Nick Desaulniers Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/proc.S | 63 ++++++++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a1e0592d1fbc..fdabf40a83c8 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -250,15 +250,15 @@ ENTRY(idmap_kpti_install_ng_mappings) /* We're the boot CPU. Wait for the others to catch up */ sevl 1: wfe - ldaxr w18, [flag_ptr] - eor w18, w18, num_cpus - cbnz w18, 1b + ldaxr w17, [flag_ptr] + eor w17, w17, num_cpus + cbnz w17, 1b /* We need to walk swapper, so turn off the MMU. */ pre_disable_mmu_workaround - mrs x18, sctlr_el1 - bic x18, x18, #SCTLR_ELx_M - msr sctlr_el1, x18 + mrs x17, sctlr_el1 + bic x17, x17, #SCTLR_ELx_M + msr sctlr_el1, x17 isb /* Everybody is enjoying the idmap, so we can rewrite swapper. */ @@ -281,9 +281,9 @@ skip_pgd: isb /* We're done: fire up the MMU again */ - mrs x18, sctlr_el1 - orr x18, x18, #SCTLR_ELx_M - msr sctlr_el1, x18 + mrs x17, sctlr_el1 + orr x17, x17, #SCTLR_ELx_M + msr sctlr_el1, x17 isb /* @@ -353,46 +353,47 @@ skip_pte: b.ne do_pte b next_pmd + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte + /* Secondary CPUs end up here */ __idmap_kpti_secondary: /* Uninstall swapper before surgery begins */ - __idmap_cpu_set_reserved_ttbr1 x18, x17 + __idmap_cpu_set_reserved_ttbr1 x16, x17 /* Increment the flag to let the boot CPU we're ready */ -1: ldxr w18, [flag_ptr] - add w18, w18, #1 - stxr w17, w18, [flag_ptr] +1: ldxr w16, [flag_ptr] + add w16, w16, #1 + stxr w17, w16, [flag_ptr] cbnz w17, 1b /* Wait for the boot CPU to finish messing around with swapper */ sevl 1: wfe - ldxr w18, [flag_ptr] - cbnz w18, 1b + ldxr w16, [flag_ptr] + cbnz w16, 1b /* All done, act like nothing happened */ - offset_ttbr1 swapper_ttb, x18 + offset_ttbr1 swapper_ttb, x16 msr ttbr1_el1, swapper_ttb isb ret - .unreq cpu - .unreq num_cpus - .unreq swapper_pa .unreq swapper_ttb .unreq flag_ptr - .unreq cur_pgdp - .unreq end_pgdp - .unreq pgd - .unreq cur_pudp - .unreq end_pudp - .unreq pud - .unreq cur_pmdp - .unreq end_pmdp - .unreq pmd - .unreq cur_ptep - .unreq end_ptep - .unreq pte ENDPROC(idmap_kpti_install_ng_mappings) .popsection #endif -- cgit v1.2.3 From 7f153ccb9bb4c83a62cce8d034012698f68a3b4c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 6 Dec 2019 14:13:38 -0800 Subject: arm64/lib: copy_page: avoid x18 register in assembler code Register x18 will no longer be used as a caller save register in the future, so stop using it in the copy_page() code. Link: https://patchwork.kernel.org/patch/9836869/ Signed-off-by: Ard Biesheuvel [Sami: changed the offset and bias to be explicit] Signed-off-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/lib/copy_page.S | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index bbb8562396af..290dd3c5266c 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -34,45 +34,45 @@ alternative_else_nop_endif ldp x14, x15, [x1, #96] ldp x16, x17, [x1, #112] - mov x18, #(PAGE_SIZE - 128) + add x0, x0, #256 add x1, x1, #128 1: - subs x18, x18, #128 + tst x0, #(PAGE_SIZE - 1) alternative_if ARM64_HAS_NO_HW_PREFETCH prfm pldl1strm, [x1, #384] alternative_else_nop_endif - stnp x2, x3, [x0] + stnp x2, x3, [x0, #-256] ldp x2, x3, [x1] - stnp x4, x5, [x0, #16] + stnp x4, x5, [x0, #16 - 256] ldp x4, x5, [x1, #16] - stnp x6, x7, [x0, #32] + stnp x6, x7, [x0, #32 - 256] ldp x6, x7, [x1, #32] - stnp x8, x9, [x0, #48] + stnp x8, x9, [x0, #48 - 256] ldp x8, x9, [x1, #48] - stnp x10, x11, [x0, #64] + stnp x10, x11, [x0, #64 - 256] ldp x10, x11, [x1, #64] - stnp x12, x13, [x0, #80] + stnp x12, x13, [x0, #80 - 256] ldp x12, x13, [x1, #80] - stnp x14, x15, [x0, #96] + stnp x14, x15, [x0, #96 - 256] ldp x14, x15, [x1, #96] - stnp x16, x17, [x0, #112] + stnp x16, x17, [x0, #112 - 256] ldp x16, x17, [x1, #112] add x0, x0, #128 add x1, x1, #128 - b.gt 1b + b.ne 1b - stnp x2, x3, [x0] - stnp x4, x5, [x0, #16] - stnp x6, x7, [x0, #32] - stnp x8, x9, [x0, #48] - stnp x10, x11, [x0, #64] - stnp x12, x13, [x0, #80] - stnp x14, x15, [x0, #96] - stnp x16, x17, [x0, #112] + stnp x2, x3, [x0, #-256] + stnp x4, x5, [x0, #16 - 256] + stnp x6, x7, [x0, #32 - 256] + stnp x8, x9, [x0, #48 - 256] + stnp x10, x11, [x0, #64 - 256] + stnp x12, x13, [x0, #80 - 256] + stnp x14, x15, [x0, #96 - 256] + stnp x16, x17, [x0, #112 - 256] ret ENDPROC(copy_page) -- cgit v1.2.3 From af12376814a5d8d2fa1c86eaa3adacf9dee2102e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 6 Dec 2019 14:13:39 -0800 Subject: arm64: kvm: stop treating register x18 as caller save In preparation of reserving x18, stop treating it as caller save in the KVM guest entry/exit code. Currently, the code assumes there is no need to preserve it for the host, given that it would have been assumed clobbered anyway by the function call to __guest_enter(). Instead, preserve its value and restore it upon return. Link: https://patchwork.kernel.org/patch/9836891/ Signed-off-by: Ard Biesheuvel [Sami: updated commit message, switched from x18 to x29 for the guest context] Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Reviewed-by: Marc Zyngier Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kvm/hyp/entry.S | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index e5cc8d66bf53..0c6832ec52b1 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -22,7 +22,12 @@ .text .pushsection .hyp.text, "ax" +/* + * We treat x18 as callee-saved as the host may use it as a platform + * register (e.g. for shadow call stack). + */ .macro save_callee_saved_regs ctxt + str x18, [\ctxt, #CPU_XREG_OFFSET(18)] stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] @@ -32,6 +37,8 @@ .endm .macro restore_callee_saved_regs ctxt + // We require \ctxt is not x18-x28 + ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] @@ -48,7 +55,7 @@ ENTRY(__guest_enter) // x0: vcpu // x1: host context // x2-x17: clobbered by macros - // x18: guest context + // x29: guest context // Store the host regs save_callee_saved_regs x1 @@ -67,31 +74,28 @@ alternative_else_nop_endif ret 1: - add x18, x0, #VCPU_CONTEXT + add x29, x0, #VCPU_CONTEXT // Macro ptrauth_switch_to_guest format: // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) // The below macro to restore guest keys is not implemented in C code // as it may cause Pointer Authentication key signing mismatch errors // when this feature is enabled for kernel code. - ptrauth_switch_to_guest x18, x0, x1, x2 + ptrauth_switch_to_guest x29, x0, x1, x2 // Restore guest regs x0-x17 - ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] - ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] - ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)] - ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)] - ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)] - ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)] - ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)] - ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)] - ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)] - - // Restore guest regs x19-x29, lr - restore_callee_saved_regs x18 - - // Restore guest reg x18 - ldr x18, [x18, #CPU_XREG_OFFSET(18)] + ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] + ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] + ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] + ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] + ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] + ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] + ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] + ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] + ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)] + + // Restore guest regs x18-x29, lr + restore_callee_saved_regs x29 // Do not touch any register after this! eret @@ -114,7 +118,7 @@ ENTRY(__guest_exit) // Retrieve the guest regs x0-x1 from the stack ldp x2, x3, [sp], #16 // x0, x1 - // Store the guest regs x0-x1 and x4-x18 + // Store the guest regs x0-x1 and x4-x17 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] @@ -123,9 +127,8 @@ ENTRY(__guest_exit) stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] - str x18, [x1, #CPU_XREG_OFFSET(18)] - // Store the guest regs x19-x29, lr + // Store the guest regs x18-x29, lr save_callee_saved_regs x1 get_host_ctxt x2, x3 -- cgit v1.2.3 From 500d14affdf73677071c075bb9becc637b60fe39 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 6 Dec 2019 14:13:40 -0800 Subject: arm64: kernel: avoid x18 in __cpu_soft_restart The code in __cpu_soft_restart() uses x18 as an arbitrary temp register, which will shortly be disallowed. So use x8 instead. Link: https://patchwork.kernel.org/patch/9836877/ Signed-off-by: Ard Biesheuvel [Sami: updated commit message] Signed-off-by: Sami Tolvanen Reviewed-by: Mark Rutland Reviewed-by: Kees Cook Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu-reset.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 6ea337d464c4..32c7bf858dd9 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -42,11 +42,11 @@ ENTRY(__cpu_soft_restart) mov x0, #HVC_SOFT_RESTART hvc #0 // no return -1: mov x18, x1 // entry +1: mov x8, x1 // entry mov x0, x2 // arg0 mov x1, x3 // arg1 mov x2, x4 // arg2 - br x18 + br x8 ENDPROC(__cpu_soft_restart) .popsection -- cgit v1.2.3 From 83b0c36b8a136042aa9209fc9ca2fab31d5fbe7c Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 17 Jan 2020 01:33:53 +0530 Subject: arm64: Add KRYO{3,4}XX CPU cores to spectre-v2 safe list The "silver" KRYO3XX and KRYO4XX CPU cores are not affected by Spectre variant 2. Add them to spectre_v2 safe list to correct the spurious ARM_SMCCC_ARCH_WORKAROUND_1 warning and vulnerability status reported under sysfs. Reviewed-by: Stephen Boyd Tested-by: Stephen Boyd Signed-off-by: Sai Prakash Ranjan [will: tweaked commit message to remove stale mention of "gold" cores] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cputype.h | 4 ++++ arch/arm64/kernel/cpu_errata.c | 2 ++ 2 files changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index aca07c2f6e6e..a87a93f67671 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -85,6 +85,8 @@ #define QCOM_CPU_PART_FALKOR_V1 0x800 #define QCOM_CPU_PART_FALKOR 0xC00 #define QCOM_CPU_PART_KRYO 0x200 +#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 +#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 @@ -111,6 +113,8 @@ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) +#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) +#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 85f4bec22f6d..d661c7d0e6e2 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -548,6 +548,8 @@ static const struct midr_range spectre_v2_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), { /* sentinel */ } }; -- cgit v1.2.3 From 95b3f74bec203804658e17f86fe20755bb8abcb9 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 11 Dec 2019 18:40:09 +0000 Subject: arm64: Use macros instead of hard-coded constants for MAIR_EL1 Currently, the arm64 __cpu_setup has hard-coded constants for the memory attributes that go into the MAIR_EL1 register. Define proper macros in asm/sysreg.h and make use of them in proc.S. Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 12 ++++++++++++ arch/arm64/mm/proc.S | 27 ++++++++++----------------- 2 files changed, 22 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..e21470337c5e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -538,6 +538,18 @@ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) +/* MAIR_ELx memory attributes (used by Linux) */ +#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) +#define MAIR_ATTR_DEVICE_nGnRE UL(0x04) +#define MAIR_ATTR_DEVICE_GRE UL(0x0c) +#define MAIR_ATTR_NORMAL_NC UL(0x44) +#define MAIR_ATTR_NORMAL_WT UL(0xbb) +#define MAIR_ATTR_NORMAL UL(0xff) +#define MAIR_ATTR_MASK UL(0xff) + +/* Position the attr at the correct index */ +#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) + /* id_aa64isar0 */ #define ID_AA64ISAR0_TS_SHIFT 52 #define ID_AA64ISAR0_FHM_SHIFT 48 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a1e0592d1fbc..55f715957b36 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -42,7 +42,14 @@ #define TCR_KASAN_FLAGS 0 #endif -#define MAIR(attr, mt) ((attr) << ((mt) * 8)) +/* Default MAIR_EL1 */ +#define MAIR_EL1_SET \ + (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) #ifdef CONFIG_CPU_PM /** @@ -416,23 +423,9 @@ ENTRY(__cpu_setup) enable_dbg // since this is per-cpu reset_pmuserenr_el0 x0 // Disable PMU access from EL0 /* - * Memory region attributes for LPAE: - * - * n = AttrIndx[2:0] - * n MAIR - * DEVICE_nGnRnE 000 00000000 - * DEVICE_nGnRE 001 00000100 - * DEVICE_GRE 010 00001100 - * NORMAL_NC 011 01000100 - * NORMAL 100 11111111 - * NORMAL_WT 101 10111011 + * Memory region attributes */ - ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ - MAIR(0x04, MT_DEVICE_nGnRE) | \ - MAIR(0x0c, MT_DEVICE_GRE) | \ - MAIR(0x44, MT_NORMAL_NC) | \ - MAIR(0xff, MT_NORMAL) | \ - MAIR(0xbb, MT_NORMAL_WT) + mov_q x5, MAIR_EL1_SET msr mair_el1, x5 /* * Prepare SCTLR -- cgit v1.2.3 From 170b25fa6aab84dc9bdba88d4c64af5fbb789b6b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:44 +0000 Subject: arm64: assembler: remove inherit_daif macro We haven't needed the inherit_daif macro since commit: ed3768db588291dd ("arm64: entry: convert el1_sync to C") ... which converted all callers to C and the local_daif_inherit function. Remove the unused macro. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: James Morse Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b8cf7c85ffa2..5f8a2772baeb 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -40,12 +40,6 @@ msr daif, \flags .endm - /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */ - .macro inherit_daif, pstate:req, tmp:req - and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) - msr daif, \tmp - .endm - /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ .macro enable_da_f msr daifclr, #(8 | 4 | 1) -- cgit v1.2.3 From ddb953f86cfb1ec450eff16a94230fc6d9901552 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:45 +0000 Subject: arm64: assembler: remove smp_dmb macro These days arm64 kernels are always SMP, and thus smp_dmb is an overly-long way of writing dmb. Naturally, no-one uses it. Remove the unused macro. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5f8a2772baeb..995362adaac0 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -79,13 +79,6 @@ 9990: .endm -/* - * SMP data memory barrier - */ - .macro smp_dmb, opt - dmb \opt - .endm - /* * RAS Error Synchronization barrier */ -- cgit v1.2.3 From 2d226c1e1c194a4737bf17ad8432a479e4e7e4a6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:46 +0000 Subject: arm64: entry: mark all entry code as notrace Almost all functions in entry-common.c are marked notrace, with el1_undef and el1_inv being the only exceptions. We appear to have done this on the assumption that there were no exception registers that we needed to snapshot, and thus it was safe to run trace code that might result in further exceptions and clobber those registers. However, until we inherit the DAIF flags, our irq flag tracing is stale, and this discrepancy could set off warnings in some configurations. For example if CONFIG_DEBUG_LOCKDEP is selected and a trace function calls into any flag-checking locking routines. Given we don't expect to trigger el1_undef or el1_inv unless something is already wrong, any irqflag warnigns are liable to mask the information we'd actually care about. Let's keep things simple and mark el1_undef and el1_inv as notrace. Developers can trace do_undefinstr and bad_mode if they really want to monitor these cases. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: James Morse Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 5dce5e56995a..67198142a0fc 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -36,14 +36,14 @@ static void notrace el1_pc(struct pt_regs *regs, unsigned long esr) } NOKPROBE_SYMBOL(el1_pc); -static void el1_undef(struct pt_regs *regs) +static void notrace el1_undef(struct pt_regs *regs) { local_daif_inherit(regs); do_undefinstr(regs); } NOKPROBE_SYMBOL(el1_undef); -static void el1_inv(struct pt_regs *regs, unsigned long esr) +static void notrace el1_inv(struct pt_regs *regs, unsigned long esr) { local_daif_inherit(regs); bad_mode(regs, 0, esr); -- cgit v1.2.3 From 7a2c094464e39a54f5b9228cd78208cd43872bbd Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:47 +0000 Subject: arm64: entry: cleanup el0 svc handler naming For most of the exception entry code, _handler() is the first C function called from the entry assembly in entry-common.c, and external functions handling the bulk of the logic are called do_(). For consistency, apply this scheme to el0_svc_handler and el0_svc_compat_handler, renaming them to do_el0_svc and do_el0_svc_compat respectively. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Anshuman Khandual Cc: Catalin Marinas Cc: James Morse Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 4 ++-- arch/arm64/kernel/entry-common.c | 4 ++-- arch/arm64/kernel/syscall.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 4d5f3b5f50cd..b87c6e276ab1 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -45,8 +45,8 @@ void do_sysinstr(unsigned int esr, struct pt_regs *regs); void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); void do_cp15instr(unsigned int esr, struct pt_regs *regs); -void el0_svc_handler(struct pt_regs *regs); -void el0_svc_compat_handler(struct pt_regs *regs); +void do_el0_svc(struct pt_regs *regs); +void do_el0_svc_compat(struct pt_regs *regs); void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, struct pt_regs *regs); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 67198142a0fc..fde59981445c 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -215,7 +215,7 @@ static void notrace el0_svc(struct pt_regs *regs) if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - el0_svc_handler(regs); + do_el0_svc(regs); } NOKPROBE_SYMBOL(el0_svc); @@ -281,7 +281,7 @@ static void notrace el0_svc_compat(struct pt_regs *regs) if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - el0_svc_compat_handler(regs); + do_el0_svc_compat(regs); } NOKPROBE_SYMBOL(el0_svc_compat); diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 9a9d98a443fc..a12c0c88d345 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -154,14 +154,14 @@ static inline void sve_user_discard(void) sve_user_disable(); } -void el0_svc_handler(struct pt_regs *regs) +void do_el0_svc(struct pt_regs *regs) { sve_user_discard(); el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); } #ifdef CONFIG_COMPAT -void el0_svc_compat_handler(struct pt_regs *regs) +void do_el0_svc_compat(struct pt_regs *regs) { el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, compat_sys_call_table); -- cgit v1.2.3 From 3e3934176a4f9d479dea5cdd2fe6dd560f0ca0cf Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:48 +0000 Subject: arm64: entry: cleanup sp_el0 manipulation The kernel stashes the current task struct in sp_el0 so that this can be acquired consistently/cheaply when required. When we take an exception from EL0 we have to: 1) stash the original sp_el0 value 2) find the current task 3) update sp_el0 with the current task pointer Currently steps #1 and #2 occur in one place, and step #3 a while later. As the value of sp_el0 is immaterial between these points, let's move them together to make the code clearer and minimize ifdeffery. This necessitates moving the comment for MDSCR_EL1.SS. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: James Morse Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7c6a0a41676f..3d7a9c0ab844 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -167,9 +167,13 @@ alternative_cb_end .if \el == 0 clear_gp_regs mrs x21, sp_el0 - ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, - ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug - disable_step_tsk x19, x20 // exceptions when scheduling. + ldr_this_cpu tsk, __entry_task, x20 + msr sp_el0, tsk + + // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions + // when scheduling. + ldr x19, [tsk, #TSK_TI_FLAGS] + disable_step_tsk x19, x20 apply_ssbd 1, x22, x23 @@ -232,13 +236,6 @@ alternative_else_nop_endif str w21, [sp, #S_SYSCALLNO] .endif - /* - * Set sp_el0 to current thread_info. - */ - .if \el == 0 - msr sp_el0, tsk - .endif - /* Save pmr */ alternative_if ARM64_HAS_IRQ_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 -- cgit v1.2.3 From c2c24edb1d9c308011f5a1328563d8da8c92c849 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 17 Jan 2020 15:48:39 +0000 Subject: arm64: csum: Fix pathological zero-length calls In validating the checksumming results of the new routine, I sadly neglected to test its not-checksumming results. Thus it slipped through that the one case where @buff is already dword-aligned and @len = 0 manages to defeat the tail-masking logic and behave as if @len = 8. For a zero length it doesn't make much sense to deference @buff anyway, so just add an early return (which has essentially zero impact on performance). Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/lib/csum.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c index 847eb725ce09..1f82c66b32ea 100644 --- a/arch/arm64/lib/csum.c +++ b/arch/arm64/lib/csum.c @@ -20,6 +20,9 @@ unsigned int do_csum(const unsigned char *buff, int len) const u64 *ptr; u64 data, sum64 = 0; + if (unlikely(len == 0)) + return 0; + offset = (unsigned long)buff & 7; /* * This is to all intents and purposes safe, since rounding down cannot -- cgit v1.2.3 From 98346023365931948b78436ae761544b09035b3b Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 20 Jan 2020 10:36:02 +0000 Subject: arm64: Kconfig: select HAVE_FUTEX_CMPXCHG arm64 provides always working implementation of futex_atomic_cmpxchg_inatomic(), so there is no need to check it runtime. Reported-by: Piyush swami Signed-off-by: Vladimir Murzin Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e9b1fc22f72e..6c27b8126817 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -161,6 +161,7 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_RCU_TABLE_FREE select HAVE_RSEQ select HAVE_STACKPROTECTOR -- cgit v1.2.3 From 108eae2d4d10cd29f7af75d45e38bc3bcde24a7c Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 9 Jan 2020 16:02:59 +0000 Subject: arm64: entry: Avoid empty alternatives entries kernel_ventry will create alternative entries to potentially replace 0 instructions with 0 instructions for EL1 vectors. While this does not cause an issue, it pointlessly takes up some bytes in the alternatives section. Do not generate such entries. Acked-by: Mark Rutland Signed-off-by: Julien Thierry Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7c6a0a41676f..605bb7cbe499 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -60,16 +60,16 @@ .macro kernel_ventry, el, label, regsize = 64 .align 7 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -alternative_if ARM64_UNMAP_KERNEL_AT_EL0 .if \el == 0 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0 .if \regsize == 64 mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else mov x30, xzr .endif - .endif alternative_else_nop_endif + .endif #endif sub sp, sp, #S_FRAME_SIZE -- cgit v1.2.3 From d7bbd6c1b01cb5dd13c245d4586a83145c1d5f52 Mon Sep 17 00:00:00 2001 From: Dirk Behme Date: Tue, 21 Jan 2020 16:54:39 +0100 Subject: arm64: kbuild: remove compressed images on 'make ARCH=arm64 (dist)clean' Since v4.3-rc1 commit 0723c05fb75e44 ("arm64: enable more compressed Image formats"), it is possible to build Image.{bz2,lz4,lzma,lzo} AArch64 images. However, the commit missed adding support for removing those images on 'make ARCH=arm64 (dist)clean'. Fix this by adding them to the target list. Make sure to match the order of the recipes in the makefile. Cc: stable@vger.kernel.org # v4.3+ Fixes: 0723c05fb75e44 ("arm64: enable more compressed Image formats") Signed-off-by: Dirk Behme Signed-off-by: Eugeniu Rosca Reviewed-by: Masahiro Yamada Signed-off-by: Will Deacon --- arch/arm64/boot/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index 1f012c506434..cd3414898d10 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -16,7 +16,7 @@ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S -targets := Image Image.gz +targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) -- cgit v1.2.3 From 1a50ec0b3b2e9a83f1b1245ea37a853aac2f741c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 21 Jan 2020 12:58:52 +0000 Subject: arm64: Implement archrandom.h for ARMv8.5-RNG Expose the ID_AA64ISAR0.RNDR field to userspace, as the RNG system registers are always available at EL0. Implement arch_get_random_seed_long using RNDR. Given that the TRNG is likely to be a shared resource between cores, and VMs, do not explicitly force re-seeding with RNDRRS. In order to avoid code complexity and potential issues with hetrogenous systems only provide values after cpufeature has finalized the system capabilities. Signed-off-by: Richard Henderson [Modified to only function after cpufeature has finalized the system capabilities and move all the code into the header -- broonie] Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Reviewed-by: Ard Biesheuvel [will: Advertise HWCAP via /proc/cpuinfo] Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 12 +++++++ arch/arm64/include/asm/archrandom.h | 67 +++++++++++++++++++++++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/include/asm/sysreg.h | 4 +++ arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 14 ++++++++ arch/arm64/kernel/cpuinfo.c | 1 + 8 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/archrandom.h (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..835f8158220e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1484,6 +1484,18 @@ config ARM64_PTR_AUTH endmenu +menu "ARMv8.5 architectural features" + +config ARCH_RANDOM + bool "Enable support for random number generation" + default y + help + Random number generation (part of the ARMv8.5 Extensions) + provides a high bandwidth, cryptographically secure + hardware random number generator. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h new file mode 100644 index 000000000000..5ea5a1ce5a5f --- /dev/null +++ b/arch/arm64/include/asm/archrandom.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARCHRANDOM_H +#define _ASM_ARCHRANDOM_H + +#ifdef CONFIG_ARCH_RANDOM + +#include +#include + +static inline bool __arm64_rndr(unsigned long *v) +{ + bool ok; + + /* + * Reads of RNDR set PSTATE.NZCV to 0b0000 on success, + * and set PSTATE.NZCV to 0b0100 otherwise. + */ + asm volatile( + __mrs_s("%0", SYS_RNDR_EL0) "\n" + " cset %w1, ne\n" + : "=r" (*v), "=r" (ok) + : + : "cc"); + + return ok; +} + +static inline bool __must_check arch_get_random_long(unsigned long *v) +{ + return false; +} + +static inline bool __must_check arch_get_random_int(unsigned int *v) +{ + return false; +} + +static inline bool __must_check arch_get_random_seed_long(unsigned long *v) +{ + /* + * Only support the generic interface after we have detected + * the system wide capability, avoiding complexity with the + * cpufeature code and with potential scheduling between CPUs + * with and without the feature. + */ + if (!cpus_have_const_cap(ARM64_HAS_RNG)) + return false; + + return __arm64_rndr(v); +} + + +static inline bool __must_check arch_get_random_seed_int(unsigned int *v) +{ + unsigned long val; + bool ok = arch_get_random_seed_long(&val); + + *v = val; + return ok; +} + +#else + +static inline bool __arm64_rndr(unsigned long *v) { return false; } + +#endif /* CONFIG_ARCH_RANDOM */ +#endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b92683871119..515f4fbcbf91 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -56,7 +56,8 @@ #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 #define ARM64_WORKAROUND_1542419 47 #define ARM64_WORKAROUND_1319367 48 +#define ARM64_HAS_RNG 49 -#define ARM64_NCAPS 49 +#define ARM64_NCAPS 50 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 3d2f2472a36c..fa186480e805 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -86,6 +86,7 @@ #define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4) #define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2) #define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT) +#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..5e718f279469 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -365,6 +365,9 @@ #define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) #define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) +#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) +#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) + #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) @@ -539,6 +542,7 @@ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) /* id_aa64isar0 */ +#define ID_AA64ISAR0_RNDR_SHIFT 60 #define ID_AA64ISAR0_TS_SHIFT 52 #define ID_AA64ISAR0_FHM_SHIFT 48 #define ID_AA64ISAR0_DP_SHIFT 44 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index a1e72886b30c..f192ac33fc76 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -65,5 +65,6 @@ #define HWCAP2_SVESM4 (1 << 6) #define HWCAP2_FLAGM2 (1 << 7) #define HWCAP2_FRINT (1 << 8) +#define HWCAP2_RNG (1 << 9) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 04cf64e9f0c9..3b94e8047c9e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -119,6 +119,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), @@ -1566,6 +1567,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 1, }, +#endif +#ifdef CONFIG_ARCH_RANDOM + { + .desc = "Random Number Generator", + .capability = ARM64_HAS_RNG, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_RNDR_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, #endif {}, }; @@ -1638,6 +1651,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 56bba746da1c..3000dd27816f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -84,6 +84,7 @@ static const char *const hwcap_str[] = { "svesm4", "flagm2", "frint", + "rng", NULL }; -- cgit v1.2.3 From 2e8e1ea88cbcb19a77b7acb67f6ffe39cc15740c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 21 Jan 2020 12:58:53 +0000 Subject: arm64: Use v8.5-RNG entropy for KASLR seed When seeding KALSR on a system where we have architecture level random number generation make use of that entropy, mixing it in with the seed passed by the bootloader. Since this is run very early in init before feature detection is complete we open code rather than use archrandom.h. Signed-off-by: Mark Brown Reviewed-by: Mark Rutland Reviewed-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/archrandom.h | 8 ++++++++ arch/arm64/kernel/kaslr.c | 11 +++++++++++ 2 files changed, 19 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index 5ea5a1ce5a5f..3fe02da70004 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -59,9 +59,17 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v) return ok; } +static inline bool __init __early_cpu_has_rndr(void) +{ + /* Open code as we run prior to the first call to cpufeature. */ + unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); + return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf; +} + #else static inline bool __arm64_rndr(unsigned long *v) { return false; } +static inline bool __init __early_cpu_has_rndr(void) { return false; } #endif /* CONFIG_ARCH_RANDOM */ #endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 2a11a962e571..53b8a4ee64ff 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -120,6 +120,17 @@ u64 __init kaslr_early_init(u64 dt_phys) return 0; } + /* + * Mix in any entropy obtainable architecturally, open coded + * since this runs extremely early. + */ + if (__early_cpu_has_rndr()) { + unsigned long raw; + + if (__arm64_rndr(&raw)) + seed ^= raw; + } + if (!seed) { kaslr_status = KASLR_DISABLED_NO_SEED; return 0; -- cgit v1.2.3 From e717d93b1c3f5e263c6c43b6ff0835a1279cb6fc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 22 Jan 2020 11:23:54 +0000 Subject: arm64: kconfig: Fix alignment of E0PD help text Remove the additional space. Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9cee2008ea9e..aca103ef52b4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1490,13 +1490,13 @@ config ARM64_E0PD bool "Enable support for E0PD" default y help - E0PD (part of the ARMv8.5 extensions) allows us to ensure - that EL0 accesses made via TTBR1 always fault in constant time, - providing similar benefits to KASLR as those provided by KPTI, but - with lower overhead and without disrupting legitimate access to - kernel memory such as SPE. + E0PD (part of the ARMv8.5 extensions) allows us to ensure + that EL0 accesses made via TTBR1 always fault in constant time, + providing similar benefits to KASLR as those provided by KPTI, but + with lower overhead and without disrupting legitimate access to + kernel memory such as SPE. - This option enables E0PD for TTBR1 where available. + This option enables E0PD for TTBR1 where available. endmenu -- cgit v1.2.3 From e533dbe9dcb199bb637a2c465f3a6e70564994fe Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 22 Jan 2020 12:45:46 +0000 Subject: arm64: acpi: fix DAIF manipulation with pNMI Since commit: d44f1b8dd7e66d80 ("arm64: KVM/mm: Move SEA handling behind a single 'claim' interface") ... the top-level APEI SEA handler has the shape: 1. current_flags = arch_local_save_flags() 2. local_daif_restore(DAIF_ERRCTX) 3. 4. local_daif_restore(current_flags) However, since commit: 4a503217ce37e1f4 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking") ... when pseudo-NMIs (pNMIs) are in use, arch_local_save_flags() will save the PMR value rather than the DAIF flags. The combination of these two commits means that the APEI SEA handler will erroneously attempt to restore the PMR value into DAIF. Fix this by factoring local_daif_save_flags() out of local_daif_save(), so that we can consistently save DAIF in step #1, regardless of whether pNMIs are in use. Both commits were introduced concurrently in v5.0. Cc: Fixes: 4a503217ce37e1f4 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking") Fixes: d44f1b8dd7e66d80 ("arm64: KVM/mm: Move SEA handling behind a single 'claim' interface") Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: James Morse Cc: Julien Thierry Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/daifflags.h | 11 ++++++++++- arch/arm64/kernel/acpi.c | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 72acd2db167f..ec213b4a1650 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -38,7 +38,7 @@ static inline void local_daif_mask(void) trace_hardirqs_off(); } -static inline unsigned long local_daif_save(void) +static inline unsigned long local_daif_save_flags(void) { unsigned long flags; @@ -50,6 +50,15 @@ static inline unsigned long local_daif_save(void) flags |= PSR_I_BIT; } + return flags; +} + +static inline unsigned long local_daif_save(void) +{ + unsigned long flags; + + flags = local_daif_save_flags(); + local_daif_mask(); return flags; diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 3a58e9db5cfe..a100483b47c4 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -274,7 +274,7 @@ int apei_claim_sea(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) return err; - current_flags = arch_local_save_flags(); + current_flags = local_daif_save_flags(); /* * SEA can interrupt SError, mask it and describe this as an NMI so -- cgit v1.2.3