diff options
author | Quentin Perret <qperret@google.com> | 2021-03-19 10:01:36 +0000 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2021-03-19 12:01:22 +0000 |
commit | f60ca2f9321a71ee3d2a7bd620c1827b82ce05f2 (patch) | |
tree | 9c952b4ee0c020d4dd7f3c484abd27d3e8df168f /arch/arm64/kvm/hyp | |
parent | a14307f5310c737744641ff8da7a8d491c3c85cd (diff) |
KVM: arm64: Always zero invalid PTEs
kvm_set_invalid_pte() currently only clears bit 0 from a PTE because
stage2_map_walk_table_post() needs to be able to follow the anchor. In
preparation for re-using bits 63-01 from invalid PTEs, make sure to zero
it entirely by ensuring to cache the anchor's child upfront.
Acked-by: Will Deacon <will@kernel.org>
Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-29-qperret@google.com
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r-- | arch/arm64/kvm/hyp/pgtable.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 15de1708cfcd..0a674010afb6 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -156,10 +156,9 @@ static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_op return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); } -static void kvm_set_invalid_pte(kvm_pte_t *ptep) +static void kvm_clear_pte(kvm_pte_t *ptep) { - kvm_pte_t pte = *ptep; - WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID); + WRITE_ONCE(*ptep, 0); } static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp, @@ -444,6 +443,7 @@ struct stage2_map_data { kvm_pte_t attr; kvm_pte_t *anchor; + kvm_pte_t *childp; struct kvm_s2_mmu *mmu; void *memcache; @@ -533,7 +533,7 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, * There's an existing different valid leaf entry, so perform * break-before-make. */ - kvm_set_invalid_pte(ptep); + kvm_clear_pte(ptep); kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); mm_ops->put_page(ptep); } @@ -554,7 +554,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, if (!kvm_block_mapping_supported(addr, end, data->phys, level)) return 0; - kvm_set_invalid_pte(ptep); + data->childp = kvm_pte_follow(*ptep, data->mm_ops); + kvm_clear_pte(ptep); /* * Invalidate the whole stage-2, as we may have numerous leaf @@ -600,7 +601,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * will be mapped lazily. */ if (kvm_pte_valid(pte)) { - kvm_set_invalid_pte(ptep); + kvm_clear_pte(ptep); kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); mm_ops->put_page(ptep); } @@ -616,19 +617,24 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level, struct stage2_map_data *data) { struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + kvm_pte_t *childp; int ret = 0; if (!data->anchor) return 0; - mm_ops->put_page(kvm_pte_follow(*ptep, mm_ops)); - mm_ops->put_page(ptep); - if (data->anchor == ptep) { + childp = data->childp; data->anchor = NULL; + data->childp = NULL; ret = stage2_map_walk_leaf(addr, end, level, ptep, data); + } else { + childp = kvm_pte_follow(*ptep, mm_ops); } + mm_ops->put_page(childp); + mm_ops->put_page(ptep); + return ret; } @@ -737,7 +743,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * block entry and rely on the remaining portions being faulted * back lazily. */ - kvm_set_invalid_pte(ptep); + kvm_clear_pte(ptep); kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level); mm_ops->put_page(ptep); |