diff options
author | Christoffer Dall <christoffer.dall@linaro.org> | 2017-12-04 21:35:23 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-12-14 09:26:49 +0100 |
commit | ec7660ccdd2b71d8c7f0243f8590253713e9b75d (patch) | |
tree | c745134c653e1f9a1d08decdd179f4fd742dd024 /virt/kvm | |
parent | 8eb73e2d410f00d383023fe41c0c25c6195b7389 (diff) |
KVM: Take vcpu->mutex outside vcpu_load
As we're about to call vcpu_load() from architecture-specific
implementations of the KVM vcpu ioctls, but yet we access data
structures protected by the vcpu->mutex in the generic code, factor
this logic out from vcpu_load().
x86 is the only architecture which calls vcpu_load() outside of the main
vcpu ioctl function, and these calls will no longer take the vcpu mutex
following this patch. However, with the exception of
kvm_arch_vcpu_postcreate (see below), the callers are either in the
creation or destruction path of the VCPU, which means there cannot be
any concurrent access to the data structure, because the file descriptor
is not yet accessible, or is already gone.
kvm_arch_vcpu_postcreate makes the newly created vcpu potentially
accessible by other in-kernel threads through the kvm->vcpus array, and
we therefore take the vcpu mutex in this case directly.
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/kvm_main.c | 17 |
1 files changed, 6 insertions, 11 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bc092e3d1d73..c4d116b336f4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -151,17 +151,12 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) /* * Switches to specified vcpu, until a matching vcpu_put() */ -int vcpu_load(struct kvm_vcpu *vcpu) +void vcpu_load(struct kvm_vcpu *vcpu) { - int cpu; - - if (mutex_lock_killable(&vcpu->mutex)) - return -EINTR; - cpu = get_cpu(); + int cpu = get_cpu(); preempt_notifier_register(&vcpu->preempt_notifier); kvm_arch_vcpu_load(vcpu, cpu); put_cpu(); - return 0; } EXPORT_SYMBOL_GPL(vcpu_load); @@ -171,7 +166,6 @@ void vcpu_put(struct kvm_vcpu *vcpu) kvm_arch_vcpu_put(vcpu); preempt_notifier_unregister(&vcpu->preempt_notifier); preempt_enable(); - mutex_unlock(&vcpu->mutex); } EXPORT_SYMBOL_GPL(vcpu_put); @@ -2560,9 +2554,9 @@ static long kvm_vcpu_ioctl(struct file *filp, #endif - r = vcpu_load(vcpu); - if (r) - return r; + if (mutex_lock_killable(&vcpu->mutex)) + return -EINTR; + vcpu_load(vcpu); switch (ioctl) { case KVM_RUN: { struct pid *oldpid; @@ -2735,6 +2729,7 @@ out_free1: } out: vcpu_put(vcpu); + mutex_unlock(&vcpu->mutex); kfree(fpu); kfree(kvm_sregs); return r; |