diff options
author | Ladi Prosek <lprosek@redhat.com> | 2016-11-30 16:03:09 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-12-08 15:31:09 +0100 |
commit | ee146c1c100dbe9ca92252be2e901b957476b253 (patch) | |
tree | e43f2b2d96b96a45d8c612c7a778c5c96c453b15 | |
parent | 7ca29de21362de242025fbc1c22436e19e39dddc (diff) |
KVM: nVMX: propagate errors from prepare_vmcs02
It is possible that prepare_vmcs02 fails to load the guest state. This
patch adds the proper error handling for such a case. L1 will receive
an INVALID_STATE vmexit with the appropriate exit qualification if it
happens.
A failure to set guest CR3 is the only error propagated from prepare_vmcs02
at the moment.
Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bcad2eb11404..39a389f17f4a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9976,8 +9976,11 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) * needs. In addition to modifying the active vmcs (which is vmcs02), this * function also has additional necessary side-effects, like setting various * vcpu->arch fields. + * Returns 0 on success, 1 on failure. Invalid state exit qualification code + * is assigned to entry_failure_code on failure. */ -static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + unsigned long *entry_failure_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; @@ -10306,8 +10309,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) nested_ept_enabled) { vcpu->arch.cr3 = vmcs12->guest_cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); - } else - kvm_set_cr3(vcpu, vmcs12->guest_cr3); + } else { + if (kvm_set_cr3(vcpu, vmcs12->guest_cr3)) { + *entry_failure_code = ENTRY_FAIL_DEFAULT; + return 1; + } + } kvm_mmu_reset_context(vcpu); @@ -10326,6 +10333,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); + return 0; } /* @@ -10340,6 +10348,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) struct loaded_vmcs *vmcs02; bool ia32e; u32 msr_entry_idx; + unsigned long exit_qualification; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -10502,7 +10511,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) vmx_segment_cache_clear(vmx); - prepare_vmcs02(vcpu, vmcs12); + if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) { + leave_guest_mode(vcpu); + vmx_load_vmcs01(vcpu); + nested_vmx_entry_failure(vcpu, vmcs12, + EXIT_REASON_INVALID_STATE, exit_qualification); + return 1; + } msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, |