summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-06 14:48:06 +0300
committerAvi Kivity <avi@redhat.com>2010-08-02 06:40:20 +0300
commita9221dd5ec125fbec1702fae016c6d2ea1a9a3da (patch)
tree148607c0424c4ec993b55dd85eec69332a775b22
parentce061867aa2877605cda96fa8ec7dff15f70a983 (diff)
KVM: MMU: Atomically check for accessed bit when dropping an spte
Currently, in the window between the check for the accessed bit, and actually dropping the spte, a vcpu can access the page through the spte and set the bit, which will be ignored by the mmu. Fix by using an exchange operation to atmoically fetch the spte and drop it. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fbdca08b8d8c..ba2efcf2b86e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -288,6 +288,21 @@ static void __set_spte(u64 *sptep, u64 spte)
#endif
}
+static u64 __xchg_spte(u64 *sptep, u64 new_spte)
+{
+#ifdef CONFIG_X86_64
+ return xchg(sptep, new_spte);
+#else
+ u64 old_spte;
+
+ do {
+ old_spte = *sptep;
+ } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
+
+ return old_spte;
+#endif
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min)
{
@@ -653,18 +668,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{
pfn_t pfn;
+ u64 old_spte;
- if (!is_rmap_spte(*sptep)) {
- __set_spte(sptep, new_spte);
+ old_spte = __xchg_spte(sptep, new_spte);
+ if (!is_rmap_spte(old_spte))
return;
- }
- pfn = spte_to_pfn(*sptep);
- if (*sptep & shadow_accessed_mask)
+ pfn = spte_to_pfn(old_spte);
+ if (old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (is_writable_pte(*sptep))
+ if (is_writable_pte(old_spte))
kvm_set_pfn_dirty(pfn);
rmap_remove(kvm, sptep);
- __set_spte(sptep, new_spte);
}
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)