diff options
author | Sean Christopherson <seanjc@google.com> | 2021-02-12 16:50:04 -0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-02-19 03:08:11 -0500 |
commit | 1b6d9d9ed5717157933db77d96bb12884c17ce52 (patch) | |
tree | 823839c2cad23e0ed51db9ad41fdd3eb4e335f4c | |
parent | c060c72ffeb448fbb5864faa1f672ebfe14dd25f (diff) |
KVM: x86/mmu: Split out max mapping level calculation to helper
Factor out the logic for determining the maximum mapping level given a
memslot and a gpa. The helper will be used when zapping collapsible
SPTEs when disabling dirty logging, e.g. to avoid zapping SPTEs that
can't possibly be rebuilt as hugepages.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210213005015.1651772-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 37 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/mmu_internal.h | 2 |
2 files changed, 24 insertions, 15 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e507568cd55d..e97ed942178e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2756,8 +2756,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) __direct_pte_prefetch(vcpu, sp, sptep); } -static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, - kvm_pfn_t pfn, struct kvm_memory_slot *slot) +static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) { unsigned long hva; pte_t *pte; @@ -2776,19 +2776,36 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, */ hva = __gfn_to_hva_memslot(slot, gfn); - pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level); + pte = lookup_address_in_mm(kvm->mm, hva, &level); if (unlikely(!pte)) return PG_LEVEL_4K; return level; } +int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, kvm_pfn_t pfn, int max_level) +{ + struct kvm_lpage_info *linfo; + + max_level = min(max_level, max_huge_page_level); + for ( ; max_level > PG_LEVEL_4K; max_level--) { + linfo = lpage_info_slot(gfn, slot, max_level); + if (!linfo->disallow_lpage) + break; + } + + if (max_level == PG_LEVEL_4K) + return PG_LEVEL_4K; + + return host_pfn_mapping_level(kvm, gfn, pfn, slot); +} + int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level) { struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; kvm_pfn_t pfn = *pfnp; kvm_pfn_t mask; int level; @@ -2805,17 +2822,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, if (!slot) return PG_LEVEL_4K; - max_level = min(max_level, max_huge_page_level); - for ( ; max_level > PG_LEVEL_4K; max_level--) { - linfo = lpage_info_slot(gfn, slot, max_level); - if (!linfo->disallow_lpage) - break; - } - - if (max_level == PG_LEVEL_4K) - return PG_LEVEL_4K; - - level = host_pfn_mapping_level(vcpu, gfn, pfn, slot); + level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level); if (level == PG_LEVEL_4K) return level; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 9e38d3c5daad..0b55aa561ec8 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -138,6 +138,8 @@ enum { #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) +int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, kvm_pfn_t pfn, int max_level); int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level); |