summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorTang Chen <tangchen@cn.fujitsu.com>2014-09-24 15:57:58 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2014-09-24 14:08:01 +0200
commitc24ae0dcd3e8695efa43e71704d1fc4bc7e29e9b (patch)
tree9cd5baf0a56807097bebe6ccea30664e581c61ef /arch/x86
parent38b9917350cb2946e368ba684cfc33d1672f104e (diff)
kvm: x86: Unpin and remove kvm_arch->apic_access_page
In order to make the APIC access page migratable, stop pinning it in memory. And because the APIC access page is not pinned in memory, we can remove kvm_arch->apic_access_page. When we need to write its physical address into vmcs, we use gfn_to_page() to get its page struct, which is needed to call page_to_phys(); the page is then immediately unpinned. Suggested-by: Gleb Natapov <gleb@kernel.org> Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/kvm/x86.c22
3 files changed, 24 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 60f9d73c6282..7d603a71ab3a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -574,7 +574,7 @@ struct kvm_arch {
struct kvm_apic_map *apic_map;
unsigned int tss_addr;
- struct page *apic_access_page;
+ bool apic_access_page_done;
gpa_t wall_clock;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 881d266eb3ae..04fa1b8298c8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4033,7 +4033,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
int r = 0;
mutex_lock(&kvm->slots_lock);
- if (kvm->arch.apic_access_page)
+ if (kvm->arch.apic_access_page_done)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
kvm_userspace_mem.flags = 0;
@@ -4049,7 +4049,12 @@ static int alloc_apic_access_page(struct kvm *kvm)
goto out;
}
- kvm->arch.apic_access_page = page;
+ /*
+ * Do not pin the page in memory, so that memory hot-unplug
+ * is able to migrate it.
+ */
+ put_page(page);
+ kvm->arch.apic_access_page_done = true;
out:
mutex_unlock(&kvm->slots_lock);
return r;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c1412f5d93db..6857257f3810 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6028,19 +6028,31 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
+ struct page *page = NULL;
+
if (!kvm_x86_ops->set_apic_access_page_addr)
return;
- vcpu->kvm->arch.apic_access_page = gfn_to_page(vcpu->kvm,
- APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
- kvm_x86_ops->set_apic_access_page_addr(vcpu,
- page_to_phys(vcpu->kvm->arch.apic_access_page));
+ page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+ kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
+
+ /*
+ * Do not pin apic access page in memory, the MMU notifier
+ * will call us again if it is migrated or swapped out.
+ */
+ put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
+ /*
+ * The physical address of apic access page is stored in the VMCS.
+ * Update it when it becomes invalid.
+ */
+ if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
+ kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
/*
@@ -7297,8 +7309,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
- if (kvm->arch.apic_access_page)
- put_page(kvm->arch.apic_access_page);
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
}