diff options
author | Peter Collingbourne <pcc@google.com> | 2021-03-18 20:10:54 -0700 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2021-04-13 17:31:44 +0100 |
commit | b90e483938ce387c256e03fb144f82f64551847b (patch) | |
tree | 142877923b78d506be4812ecacc65fb8718adf29 /arch/arm64/kernel | |
parent | 201698626fbca1cf1a3b686ba14cf2a056500716 (diff) |
arm64: pac: Optimize kernel entry/exit key installation code paths
The kernel does not use any keys besides IA so we don't need to
install IB/DA/DB/GA on kernel exit if we arrange to install them
on task switch instead, which we can expect to happen an order of
magnitude less often.
Furthermore we can avoid installing the user IA in the case where the
user task has IA disabled and just leave the kernel IA installed. This
also lets us avoid needing to install IA on kernel entry.
On an Apple M1 under a hypervisor, the overhead of kernel entry/exit
has been measured to be reduced by 15.6ns in the case where IA is
enabled, and 31.9ns in the case where IA is disabled.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/Ieddf6b580d23c9e0bed45a822dabe72d2ffc9a8e
Link: https://lore.kernel.org/r/2d653d055f38f779937f2b92f8ddd5cf9e4af4f4.1616123271.git.pcc@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 33 | ||||
-rw-r--r-- | arch/arm64/kernel/pointer_auth.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 3 |
5 files changed, 23 insertions, 19 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 8b4fbad427b3..a02573c32f79 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -148,10 +148,6 @@ int main(void) #endif #ifdef CONFIG_ARM64_PTR_AUTH DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia)); - DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib)); - DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda)); - DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb)); - DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga)); DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); BLANK(); #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 36ae1cdfaeb6..2e46c75a5b48 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -247,21 +247,26 @@ alternative_else_nop_endif check_mte_async_tcf x19, x22 apply_ssbd 1, x22, x23 - ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 - #ifdef CONFIG_ARM64_PTR_AUTH alternative_if ARM64_HAS_ADDRESS_AUTH /* * Enable IA for in-kernel PAC if the task had it disabled. Although * this could be implemented with an unconditional MRS which would avoid * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. + * + * Install the kernel IA key only if IA was enabled in the task. If IA + * was disabled on kernel exit then we would have left the kernel IA + * installed so there is no need to install it again. */ ldr x0, [tsk, THREAD_SCTLR_USER] - tbnz x0, SCTLR_ELx_ENIA_SHIFT, 1f + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 + b 2f +1: mrs x0, sctlr_el1 orr x0, x0, SCTLR_ELx_ENIA msr sctlr_el1, x0 -1: +2: isb alternative_else_nop_endif #endif @@ -368,24 +373,24 @@ alternative_else_nop_endif 3: scs_save tsk, x0 - /* - * No kernel C function calls after this as user keys are set and IA may - * be disabled. - */ - ptrauth_keys_install_user tsk, x0, x1, x2 - #ifdef CONFIG_ARM64_PTR_AUTH alternative_if ARM64_HAS_ADDRESS_AUTH /* - * IA was enabled for in-kernel PAC. Disable it now if needed. - * All other per-task SCTLR bits were updated on task switch. + * IA was enabled for in-kernel PAC. Disable it now if needed, or + * alternatively install the user's IA. All other per-task keys and + * SCTLR bits were updated on task switch. + * + * No kernel C function calls after this. */ ldr x0, [tsk, THREAD_SCTLR_USER] - tbnz x0, SCTLR_ELx_ENIA_SHIFT, 1f + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_user tsk, x0, x1, x2 + b 2f +1: mrs x0, sctlr_el1 bic x0, x0, SCTLR_ELx_ENIA msr sctlr_el1, x0 -1: +2: alternative_else_nop_endif #endif diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c index f03e5bfe4490..60901ab0a7fe 100644 --- a/arch/arm64/kernel/pointer_auth.c +++ b/arch/arm64/kernel/pointer_auth.c @@ -43,6 +43,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg) get_random_bytes(&keys->apdb, sizeof(keys->apdb)); if (arg & PR_PAC_APGAKEY) get_random_bytes(&keys->apga, sizeof(keys->apga)); + ptrauth_keys_install_user(keys); return 0; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index d583cbae32f6..449b94584f9a 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -568,6 +568,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); + ptrauth_thread_switch_user(next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 6fdc8292b4f5..e3f72df9509d 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -74,8 +74,9 @@ void notrace __cpu_suspend_exit(void) */ spectre_v4_enable_mitigation(NULL); - /* Restore additional MTE-specific configuration */ + /* Restore additional feature-specific configuration */ mte_suspend_exit(); + ptrauth_suspend_exit(); } /* |