diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-17 10:32:53 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-23 09:04:56 -0400 |
commit | 56083bdf67c78030f11cdaed5b2b54959a329b02 (patch) | |
tree | 27ee00f99ea3d57a3aaaba9368af228e9353470f | |
parent | 1d2c6c9bd4b768bb665eeeb793dd50c2cebcbf0c (diff) |
KVM: x86: check_nested_events is never NULL
Both Intel and AMD now implement it, so there is no need to check if the
callback is implemented.
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/x86.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 59958ce2b681..0492baeb78ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7699,7 +7699,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu) * from L2 to L1 due to pending L1 events which require exit * from L2 to L1. */ - if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) { + if (is_guest_mode(vcpu)) { r = kvm_x86_ops.check_nested_events(vcpu); if (r != 0) return r; @@ -7761,7 +7761,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu) * proposal and current concerns. Perhaps we should be setting * KVM_REQ_EVENT only on certain events and not unconditionally? */ - if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) { + if (is_guest_mode(vcpu)) { r = kvm_x86_ops.check_nested_events(vcpu); if (r != 0) return r; @@ -8527,7 +8527,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) + if (is_guest_mode(vcpu)) kvm_x86_ops.check_nested_events(vcpu); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && |