diff options
author | Sean Christopherson <seanjc@google.com> | 2021-06-22 10:57:32 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-06-24 18:00:46 -0400 |
commit | 961f84457cd4e2fc479e59d015f1d292ec30373b (patch) | |
tree | 9b7e5e395eb6f3e55f8dec78800137959a3f9025 /arch/x86/kvm | |
parent | 36f267871edceafbfbbc5d570c34c089a2afa1c1 (diff) |
KVM: x86/mmu: Add helpers to do full reserved SPTE checks w/ generic MMU
Extract the reserved SPTE check and print helpers in get_mmio_spte() to
new helpers so that KVM can also WARN on reserved badness when making a
SPTE.
Tag the checking helper with __always_inline to improve the probability
of the compiler generating optimal code for the checking loop, e.g. gcc
appears to avoid using %rbp when the helper is tagged with a vanilla
"inline".
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-48-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 23 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/spte.h | 32 |
2 files changed, 34 insertions, 21 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4676d696b909..ad025059a041 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3594,19 +3594,6 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); } -static bool -__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) -{ - int bit7 = (pte >> 7) & 1; - - return pte & rsvd_check->rsvd_bits_mask[bit7][level-1]; -} - -static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) -{ - return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); -} - static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) { /* @@ -3684,13 +3671,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) rsvd_check = &vcpu->arch.mmu->shadow_zero_check; for (level = root; level >= leaf; level--) - /* - * Use a bitwise-OR instead of a logical-OR to aggregate the - * reserved bit and EPT's invalid memtype/XWR checks to avoid - * adding a Jcc in the loop. - */ - reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) | - __is_rsvd_bits_set(rsvd_check, sptes[level], level); + reserved |= is_rsvd_spte(rsvd_check, sptes[level], level); if (reserved) { pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n", @@ -3698,7 +3679,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) for (level = root; level >= leaf; level--) pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx", sptes[level], level, - rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]); + get_rsvd_bits(rsvd_check, sptes[level], level)); } return reserved; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index bca0ba11cccf..7a5ce9314107 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -293,6 +293,38 @@ static inline bool is_dirty_spte(u64 spte) return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; } +static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, + int level) +{ + int bit7 = (pte >> 7) & 1; + + return rsvd_check->rsvd_bits_mask[bit7][level-1]; +} + +static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, + u64 pte, int level) +{ + return pte & get_rsvd_bits(rsvd_check, pte, level); +} + +static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, + u64 pte) +{ + return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); +} + +static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, + u64 spte, int level) +{ + /* + * Use a bitwise-OR instead of a logical-OR to aggregate the reserved + * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc + * (this is extremely unlikely to be short-circuited as true). + */ + return __is_bad_mt_xwr(rsvd_check, spte) | + __is_rsvd_bits_set(rsvd_check, spte, level); +} + static inline bool spte_can_locklessly_be_made_writable(u64 spte) { return (spte & shadow_host_writable_mask) && |