summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-04-19 10:38:52 +0200
committerIngo Molnar <mingo@kernel.org>2016-04-19 10:38:52 +0200
commit6666ea558b1f4134291c15ac59366f69c2d1f321 (patch)
tree99e4204b51e69a12721123d3bb1912f6ca0a2726 /arch/x86/kvm/mmu.c
parent91ed140d6c1e168b11bbbddac4f6066f40a0c6b5 (diff)
parentc3b46c73264b03000d1e18b22f5caf63332547c9 (diff)
Merge tag 'v4.6-rc4' into x86/asm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bc1e0b65909e..4c6972f9ad1b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
!is_writable_pte(new_spte))
ret = true;
- if (!shadow_accessed_mask)
+ if (!shadow_accessed_mask) {
+ /*
+ * We don't set page dirty when dropping non-writable spte.
+ * So do it now if the new spte is becoming non-writable.
+ */
+ if (ret)
+ kvm_set_pfn_dirty(spte_to_pfn(old_spte));
return ret;
+ }
/*
* Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+ if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
+ PT_WRITABLE_MASK))
kvm_set_pfn_dirty(pfn);
return 1;
}