diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2018-12-06 17:31:19 +0000 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-12-10 11:54:37 +0000 |
commit | c987876a80e7bcb98a839f10dca9ce7fda4feced (patch) | |
tree | 90736c01cfc0860f68da3c714d50b80a7c70eb03 | |
parent | 9ff01193a20d391e8dbce4403dd5ef87c7eaaca6 (diff) |
arm64: KVM: Make VHE Stage-2 TLB invalidation operations non-interruptible
Contrary to the non-VHE version of the TLB invalidation helpers, the VHE
code has interrupts enabled, meaning that we can take an interrupt in
the middle of such a sequence, and start running something else with
HCR_EL2.TGE cleared.
That's really not a good idea.
Take the heavy-handed option and disable interrupts in
__tlb_switch_to_guest_vhe, restoring them in __tlb_switch_to_host_vhe.
The latter also gain an ISB in order to make sure that TGE really has
taken effect.
Cc: stable@vger.kernel.org
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: James Morse <james.morse@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm64/kvm/hyp/tlb.c | 35 |
1 files changed, 25 insertions, 10 deletions
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 4dbd9c69a96d..7fcc9c1a5f45 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -15,14 +15,19 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/irqflags.h> + #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <asm/tlbflush.h> -static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, + unsigned long *flags) { u64 val; + local_irq_save(*flags); + /* * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and * most TLB operations target EL2/EL0. In order to affect the @@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) isb(); } -static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, + unsigned long *flags) { __load_guest_stage2(kvm); isb(); @@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest, __tlb_switch_to_guest_vhe, ARM64_HAS_VIRT_HOST_EXTN); -static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, + unsigned long flags) { /* * We're done with the TLB operation, let's restore the host's @@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) */ write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + isb(); + local_irq_restore(flags); } -static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, + unsigned long flags) { write_sysreg(0, vttbr_el2); } @@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host, void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { + unsigned long flags; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); /* * We could do so much better if we had the VA as well. @@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) if (!has_vhe() && icache_is_vpipt()) __flush_icache_all(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) { + unsigned long flags; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalls12e1is); dsb(ish); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); + unsigned long flags; /* Switch to requested VMID */ - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalle1); dsb(nsh); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_flush_vm_context(void) |