diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2018-02-01 11:07:35 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2018-05-31 11:09:03 +0100 |
commit | 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f (patch) | |
tree | cb2e36e2ad4908ce0b3daf776c8857a729a0fc31 /arch/arm/kvm/hyp | |
parent | c44f366ea7c85e1be27d08f2f0880f4120698125 (diff) |
ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
In order to avoid aliasing attacks against the branch predictor,
let's invalidate the BTB on guest exit. This is made complicated
by the fact that we cannot take a branch before invalidating the
BTB.
We only apply this to A12 and A17, which are the only two ARM
cores on which this useful.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Boot-tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch/arm/kvm/hyp')
-rw-r--r-- | arch/arm/kvm/hyp/hyp-entry.S | 71 |
1 files changed, 69 insertions, 2 deletions
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 95a2faefc070..e789f52a5129 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -71,6 +71,66 @@ __kvm_hyp_vector: W(b) hyp_irq W(b) hyp_fiq +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + .align 5 +__kvm_hyp_vector_bp_inv: + .global __kvm_hyp_vector_bp_inv + + /* + * We encode the exception entry in the bottom 3 bits of + * SP, and we have to guarantee to be 8 bytes aligned. + */ + W(add) sp, sp, #1 /* Reset 7 */ + W(add) sp, sp, #1 /* Undef 6 */ + W(add) sp, sp, #1 /* Syscall 5 */ + W(add) sp, sp, #1 /* Prefetch abort 4 */ + W(add) sp, sp, #1 /* Data abort 3 */ + W(add) sp, sp, #1 /* HVC 2 */ + W(add) sp, sp, #1 /* IRQ 1 */ + W(nop) /* FIQ 0 */ + + mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ + isb + +#ifdef CONFIG_THUMB2_KERNEL + /* + * Yet another silly hack: Use VPIDR as a temp register. + * Thumb2 is really a pain, as SP cannot be used with most + * of the bitwise instructions. The vect_br macro ensures + * things gets cleaned-up. + */ + mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ + mov r0, sp + and r0, r0, #7 + sub sp, sp, r0 + push {r1, r2} + mov r1, r0 + mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ + mrc p15, 0, r2, c0, c0, 0 /* MIDR */ + mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ +#endif + +.macro vect_br val, targ +ARM( eor sp, sp, #\val ) +ARM( tst sp, #7 ) +ARM( eorne sp, sp, #\val ) + +THUMB( cmp r1, #\val ) +THUMB( popeq {r1, r2} ) + + beq \targ +.endm + + vect_br 0, hyp_fiq + vect_br 1, hyp_irq + vect_br 2, hyp_hvc + vect_br 3, hyp_dabt + vect_br 4, hyp_pabt + vect_br 5, hyp_svc + vect_br 6, hyp_undef + vect_br 7, hyp_reset +#endif + .macro invalid_vector label, cause .align \label: mov r0, #\cause @@ -149,7 +209,14 @@ hyp_hvc: bx ip 1: - push {lr} + /* + * Pushing r2 here is just a way of keeping the stack aligned to + * 8 bytes on any path that can trigger a HYP exception. Here, + * we may well be about to jump into the guest, and the guest + * exit would otherwise be badly decoded by our fancy + * "decode-exception-without-a-branch" code... + */ + push {r2, lr} mov lr, r0 mov r0, r1 @@ -159,7 +226,7 @@ hyp_hvc: THUMB( orr lr, #1) blx lr @ Call the HYP function - pop {lr} + pop {r2, lr} eret guest_trap: |