diff options
author | Avi Kivity <avi@redhat.com> | 2010-06-06 14:38:12 +0300 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-02 06:40:18 +0300 |
commit | ce061867aa2877605cda96fa8ec7dff15f70a983 (patch) | |
tree | 690b9723b06c2f62586f5a74121fe16f3e2e1caa /arch/x86 | |
parent | be38d276b0189fa86231fc311428622a1981ad62 (diff) |
KVM: MMU: Move accessed/dirty bit checks from rmap_remove() to drop_spte()
Since we need to make the check atomic, move it to the place that will
set the new spte.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1ad39cf70e18..fbdca08b8d8c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -612,19 +612,11 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) struct kvm_rmap_desc *desc; struct kvm_rmap_desc *prev_desc; struct kvm_mmu_page *sp; - pfn_t pfn; gfn_t gfn; unsigned long *rmapp; int i; - if (!is_rmap_spte(*spte)) - return; sp = page_header(__pa(spte)); - pfn = spte_to_pfn(*spte); - if (*spte & shadow_accessed_mask) - kvm_set_pfn_accessed(pfn); - if (is_writable_pte(*spte)) - kvm_set_pfn_dirty(pfn); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); if (!*rmapp) { @@ -660,6 +652,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) { + pfn_t pfn; + + if (!is_rmap_spte(*sptep)) { + __set_spte(sptep, new_spte); + return; + } + pfn = spte_to_pfn(*sptep); + if (*sptep & shadow_accessed_mask) + kvm_set_pfn_accessed(pfn); + if (is_writable_pte(*sptep)) + kvm_set_pfn_dirty(pfn); rmap_remove(kvm, sptep); __set_spte(sptep, new_spte); } |