diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-03-25 12:50:03 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-04-20 17:26:05 -0400 |
commit | 0cd665bd20f9088d363158b4ac75592af18ecf4f (patch) | |
tree | c6c950d8a402dc074e9edd19ecb3d512d28cc1cf /arch | |
parent | 5efac0741ce238e0844d3f7af00198f81e84926a (diff) |
KVM: x86: cleanup kvm_inject_emulated_page_fault
To reconstruct the kvm_mmu to be used for page fault injection, we
can simply use fault->nested_page_fault. This matches how
fault->nested_page_fault is assigned in the first place by
FNAME(walk_addr_generic).
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/paging_tmpl.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 8 |
3 files changed, 5 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 84eeaa4ea149..32c9f4b2a281 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4353,12 +4353,6 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) return kvm_read_cr3(vcpu); } -static void inject_page_fault(struct kvm_vcpu *vcpu, - struct x86_exception *fault) -{ - vcpu->arch.mmu->inject_page_fault(vcpu, fault); -} - static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access, int *nr_present) { diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 9bdf9b7d9a96..efec7d27b8c5 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -812,7 +812,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, if (!r) { pgprintk("%s: guest page fault\n", __func__); if (!prefault) - inject_page_fault(vcpu, &walker.fault); + kvm_inject_emulated_page_fault(vcpu, &walker.fault); return RET_PF_RETRY; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 003e625367b7..2ab821f6281f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -614,12 +614,12 @@ EXPORT_SYMBOL_GPL(kvm_inject_page_fault); bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { + struct kvm_mmu *fault_mmu; WARN_ON_ONCE(fault->vector != PF_VECTOR); - if (mmu_is_nested(vcpu) && !fault->nested_page_fault) - vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); - else - vcpu->arch.mmu->inject_page_fault(vcpu, fault); + fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : + vcpu->arch.walk_mmu; + fault_mmu->inject_page_fault(vcpu, fault); return fault->nested_page_fault; } |