summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2021-01-26 14:48:11 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2021-02-09 08:17:13 -0500
commit9ff5e0304e949a8a4e584c8c2b11fad9b2e0b133 (patch)
tree6acd2b3bc2787148343b4cb22ed41b84ae1b12df /arch
parent72167a9d7da2c295caf1d4a2d58128406786505d (diff)
KVM: x86: hyper-v: Always use to_hv_vcpu() accessor to get to 'struct kvm_vcpu_hv'
As a preparation to allocating Hyper-V context dynamically, make it clear who's the user of the said context. No functional change intended. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20210126134816.1880136-11-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/hyperv.c14
-rw-r--r--arch/x86/kvm/hyperv.h4
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/vmx/vmx.c8
-rw-r--r--arch/x86/kvm/x86.c4
5 files changed, 21 insertions, 11 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index b49e8ca6be06..1482cad2770b 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -191,7 +191,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
hv_vcpu->exit.u.synic.msr = msr;
@@ -294,7 +294,7 @@ static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
hv_vcpu->exit.u.syndbg.msr = msr;
@@ -840,7 +840,9 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
return false;
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
@@ -1231,7 +1233,7 @@ static u64 current_task_runtime_100ns(void)
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
@@ -1394,7 +1396,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{
u64 data = 0;
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
switch (msr) {
case HV_X64_MSR_VP_INDEX:
@@ -1512,7 +1514,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
{
struct kvm *kvm = vcpu->kvm;
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index fdb321ba9c3f..be1e3f5d1df6 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -119,7 +119,9 @@ static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stim
static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
{
- return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
HV_SYNIC_STIMER_COUNT);
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 74016d21738d..997c45a5963a 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -6,6 +6,8 @@
#include <linux/kvm_host.h>
+#include "hyperv.h"
+
#define KVM_APIC_INIT 0
#define KVM_APIC_SIPI 1
#define KVM_APIC_LVT_NUM 6
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8f75374b221b..f83199f4d77f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6809,12 +6809,14 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
/* All fields are clean at this point */
- if (static_branch_unlikely(&enable_evmcs))
+ if (static_branch_unlikely(&enable_evmcs)) {
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
current_evmcs->hv_clean_fields |=
HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- if (static_branch_unlikely(&enable_evmcs))
- current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
+ current_evmcs->hv_vp_id = hv_vcpu->vp_index;
+ }
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (vmx->host_debugctlmsr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1f93879c7364..40bd2e3b8f91 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8955,8 +8955,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
- vcpu->run->hyperv = vcpu->arch.hyperv.exit;
+ vcpu->run->hyperv = hv_vcpu->exit;
r = 0;
goto out;
}