diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-03-20 14:28:05 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-20 17:26:06 -0400 |
commit | 446ace4bca423297d62342801799ba4952d6b910 (patch) | |
tree | f74fd128ef5d306f7ca67200be661e3805426c4c /arch | |
parent | c746b3a4b84cc6eb2f8a45e89210f57d252d98fd (diff) |
KVM: VMX: Use vpid_sync_context() directly when possible
Use vpid_sync_context() directly for flows that run if and only if
enable_vpid=1, or more specifically, nested VMX flows that are gated by
vmx->nested.msrs.secondary_ctls_high.SECONDARY_EXEC_ENABLE_VPID being
set, which is allowed if and only if enable_vpid=1. Because these flows
call __vmx_flush_tlb() with @invalidate_gpa=false, the if-statement that
decides between INVEPT and INVVPID will always go down the INVVPID path,
i.e. call vpid_sync_context() because
"enable_ept && (invalidate_gpa || !enable_vpid)" always evaluates false.
This helps pave the way toward removing @invalidate_gpa and @vpid from
__vmx_flush_tlb() and its callers.
Opportunstically drop unnecessary brackets in handle_invvpid() around an
affected __vmx_flush_tlb()->vpid_sync_context() conversion.
No functional change intended.
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-10-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/vmx/nested.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d0857bfcb568..919fb7409535 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2459,7 +2459,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); + vpid_sync_context(nested_get_vpid02(vcpu)); } } else { /* @@ -5234,21 +5234,21 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) is_noncanonical_address(operand.gla, vcpu)) return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - if (cpu_has_vmx_invvpid_individual_addr()) { + if (cpu_has_vmx_invvpid_individual_addr()) __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid02, operand.gla); - } else - __vmx_flush_tlb(vcpu, vpid02, false); + else + vpid_sync_context(vpid02); break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: if (!operand.vpid) return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - __vmx_flush_tlb(vcpu, vpid02, false); + vpid_sync_context(vpid02); break; case VMX_VPID_EXTENT_ALL_CONTEXT: - __vmx_flush_tlb(vcpu, vpid02, false); + vpid_sync_context(vpid02); break; default: WARN_ON_ONCE(1); |