diff options
author | Paul Mackerras <paulus@samba.org> | 2011-12-12 12:33:07 +0000 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 14:52:37 +0200 |
commit | 06ce2c63d933e347f8a199f123a8a293619ab3d2 (patch) | |
tree | 455cd4b0e245675f542649fe509797f60adfe76f /arch/powerpc/include/asm/kvm_book3s_64.h | |
parent | 9d0ef5ea043d1242897d15c71bd1a15da79b4a5d (diff) |
KVM: PPC: Maintain a doubly-linked list of guest HPTEs for each gfn
This expands the reverse mapping array to contain two links for each
HPTE which are used to link together HPTEs that correspond to the
same guest logical page. Each circular list of HPTEs is pointed to
by the rmap array entry for the guest logical page, pointed to by
the relevant memslot. Links are 32-bit HPT entry indexes rather than
full 64-bit pointers, to save space. We use 3 of the remaining 32
bits in the rmap array entries as a lock bit, a referenced bit and
a present bit (the present bit is needed since HPTE index 0 is valid).
The bit lock for the rmap chain nests inside the HPTE lock bit.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/include/asm/kvm_book3s_64.h')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 18b590d261ff..9508c03e6671 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -113,6 +113,11 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) return 0; /* error */ } +static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) +{ + return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; +} + static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type) { unsigned int wimg = ptel & HPTE_R_WIMG; @@ -139,6 +144,19 @@ static inline unsigned long hpte_cache_bits(unsigned long pte_val) #endif } +static inline void lock_rmap(unsigned long *rmap) +{ + do { + while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap)) + cpu_relax(); + } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap)); +} + +static inline void unlock_rmap(unsigned long *rmap) +{ + __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap); +} + static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, unsigned long pagesize) { |