summaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJim Mattson <jmattson@google.com>2016-11-30 12:03:43 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2017-02-15 14:54:37 +0100
commite29acc55bfc3afcfdf9ea3468f1217d08f0b2d2b (patch)
treec42c95baf81361429edc47a0d4909190672db073 /arch/x86/kvm
parentcf8b84f48a5936f558a7a415f1d2f42161bf73eb (diff)
kvm: nVMX: Refactor handle_vmon()
Handle_vmon is split into two parts: the part that handles the VMXON instruction, and the part that modifies the vcpu state to transition from legacy mode to VMX operation. The latter will be used when restoring the checkpointed state of a vCPU that was in VMX operation when a snapshot was taken. Signed-off-by: Jim Mattson <jmattson@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/vmx.c93
1 files changed, 52 insertions, 41 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4b4b59b34d44..33cb8d2b9653 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7124,6 +7124,53 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
return 0;
}
+static int enter_vmx_operation(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs *shadow_vmcs;
+
+ if (cpu_has_vmx_msr_bitmap()) {
+ vmx->nested.msr_bitmap =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx->nested.msr_bitmap)
+ goto out_msr_bitmap;
+ }
+
+ vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ if (!vmx->nested.cached_vmcs12)
+ goto out_cached_vmcs12;
+
+ if (enable_shadow_vmcs) {
+ shadow_vmcs = alloc_vmcs();
+ if (!shadow_vmcs)
+ goto out_shadow_vmcs;
+ /* mark vmcs as shadow */
+ shadow_vmcs->revision_id |= (1u << 31);
+ /* init shadow vmcs */
+ vmcs_clear(shadow_vmcs);
+ vmx->vmcs01.shadow_vmcs = shadow_vmcs;
+ }
+
+ INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
+ vmx->nested.vmcs02_num = 0;
+
+ hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
+
+ vmx->nested.vmxon = true;
+ return 0;
+
+out_shadow_vmcs:
+ kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+ free_page((unsigned long)vmx->nested.msr_bitmap);
+
+out_msr_bitmap:
+ return -ENOMEM;
+}
+
/*
* Emulate the VMXON instruction.
* Currently, we just remember that VMX is active, and do not save or even
@@ -7134,9 +7181,9 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*/
static int handle_vmon(struct kvm_vcpu *vcpu)
{
+ int ret;
struct kvm_segment cs;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs *shadow_vmcs;
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7176,49 +7223,13 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
return 1;
-
- if (cpu_has_vmx_msr_bitmap()) {
- vmx->nested.msr_bitmap =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx->nested.msr_bitmap)
- goto out_msr_bitmap;
- }
-
- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
- if (!vmx->nested.cached_vmcs12)
- goto out_cached_vmcs12;
-
- if (enable_shadow_vmcs) {
- shadow_vmcs = alloc_vmcs();
- if (!shadow_vmcs)
- goto out_shadow_vmcs;
- /* mark vmcs as shadow */
- shadow_vmcs->revision_id |= (1u << 31);
- /* init shadow vmcs */
- vmcs_clear(shadow_vmcs);
- vmx->vmcs01.shadow_vmcs = shadow_vmcs;
- }
-
- INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num = 0;
-
- hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
-
- vmx->nested.vmxon = true;
+
+ ret = enter_vmx_operation(vcpu);
+ if (ret)
+ return ret;
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
-
-out_shadow_vmcs:
- kfree(vmx->nested.cached_vmcs12);
-
-out_cached_vmcs12:
- free_page((unsigned long)vmx->nested.msr_bitmap);
-
-out_msr_bitmap:
- return -ENOMEM;
}
/*