summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2021-01-26 14:48:13 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2021-02-09 08:17:15 -0500
commit4592b7eaa87d3525825d4ab2a35308bcec9e5ff9 (patch)
tree744f648c1544ecb67be2056d135d3ced51d68ede /arch
parentf2bc14b69c38b60f201fdf90c08cb2dc8966f331 (diff)
KVM: x86: hyper-v: Allocate 'struct kvm_vcpu_hv' dynamically
Hyper-V context is only needed for guests which use Hyper-V emulation in KVM (e.g. Windows/Hyper-V guests). 'struct kvm_vcpu_hv' is, however, quite big, it accounts for more than 1/4 of the total 'struct kvm_vcpu_arch' which is also quite big already. This all looks like a waste. Allocate 'struct kvm_vcpu_hv' dynamically. This patch does not bring any (intentional) functional change as we still allocate the context unconditionally but it paves the way to doing that only when needed. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20210126134816.1880136-13-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/hyperv.c16
-rw-r--r--arch/x86/kvm/hyperv.h13
-rw-r--r--arch/x86/kvm/x86.c7
4 files changed, 27 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b6de568e7476..95eb6e93a4b7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -519,6 +519,7 @@ struct kvm_vcpu_hv_synic {
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
+ struct kvm_vcpu *vcpu;
u32 vp_index;
u64 hv_vapic;
s64 runtime_offset;
@@ -735,7 +736,7 @@ struct kvm_vcpu_arch {
/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;
- struct kvm_vcpu_hv hyperv;
+ struct kvm_vcpu_hv *hyperv;
struct kvm_vcpu_xen xen;
cpumask_var_t wbinvd_dirty_mask;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 10e7ed2aa8fe..333bd200f186 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -837,6 +837,9 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_cleanup(&hv_vcpu->stimer[i]);
+
+ kfree(hv_vcpu);
+ vcpu->arch.hyperv = NULL;
}
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
@@ -886,16 +889,25 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
stimer_prepare_msg(stimer);
}
-void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
+int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu;
int i;
+ hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
+ if (!hv_vcpu)
+ return -ENOMEM;
+
+ vcpu->arch.hyperv = hv_vcpu;
+ hv_vcpu->vcpu = vcpu;
+
synic_init(&hv_vcpu->synic);
bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_init(&hv_vcpu->stimer[i], i);
+
+ return 0;
}
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 57e53a88f5eb..4bc2ab0caa92 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -57,20 +57,19 @@ static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.hyperv;
+ return vcpu->arch.hyperv;
}
static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu)
{
- struct kvm_vcpu_arch *arch;
-
- arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv);
- return container_of(arch, struct kvm_vcpu, arch);
+ return hv_vcpu->vcpu;
}
static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.hyperv.synic;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ return &hv_vcpu->synic;
}
static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
@@ -101,7 +100,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
-void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
+int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7caf95dadc93..a75d01523c4f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10083,11 +10083,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
- kvm_hv_vcpu_init(vcpu);
+ if (kvm_hv_vcpu_init(vcpu))
+ goto free_guest_fpu;
r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
- goto free_guest_fpu;
+ goto free_hv_vcpu;
vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
@@ -10098,6 +10099,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu_put(vcpu);
return 0;
+free_hv_vcpu:
+ kvm_hv_vcpu_uninit(vcpu);
free_guest_fpu:
kvm_free_guest_fpu(vcpu);
free_user_fpu: