summaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-07-23 12:32:46 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-06 17:59:14 +0200
commite920de8507c6c8760c775cd718627e7cbf57c3b7 (patch)
treefd891fb14e3044f643a168b8aa619de2651e71ae /arch/x86/kvm
parentfd1ec7723fbd560f924769b43cbdfe82dfd6a98e (diff)
KVM: vmx: compute need to reload FS/GS/LDT on demand
Remove fs_reload_needed and gs_ldt_reload_needed from host_state and instead compute whether we need to reload various state at the time we actually do the reload. The state that is tracked by the *_reload_needed variables is not any more volatile than the trackers themselves. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/vmx.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a135c91d44f8..c8a583ff7bf2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -824,8 +824,6 @@ struct vcpu_vmx {
#ifdef CONFIG_X86_64
u16 ds_sel, es_sel;
#endif
- int gs_ldt_reload_needed;
- int fs_reload_needed;
} host_state;
struct {
int vm86_active;
@@ -2681,7 +2679,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* allow segment selectors with cpl > 0 or ti == 1.
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
- vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
#ifdef CONFIG_X86_64
savesegment(ds, vmx->host_state.ds_sel);
@@ -2711,20 +2708,15 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
#endif
vmx->host_state.fs_sel = fs_sel;
- if (!(fs_sel & 7)) {
+ if (!(fs_sel & 7))
vmcs_write16(HOST_FS_SELECTOR, fs_sel);
- vmx->host_state.fs_reload_needed = 0;
- } else {
+ else
vmcs_write16(HOST_FS_SELECTOR, 0);
- vmx->host_state.fs_reload_needed = 1;
- }
vmx->host_state.gs_sel = gs_sel;
if (!(gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, gs_sel);
- else {
+ else
vmcs_write16(HOST_GS_SELECTOR, 0);
- vmx->host_state.gs_ldt_reload_needed = 1;
- }
vmcs_writel(HOST_FS_BASE, fs_base);
vmcs_writel(HOST_GS_BASE, gs_base);
@@ -2749,7 +2741,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
- if (vmx->host_state.gs_ldt_reload_needed) {
+ if (vmx->host_state.ldt_sel || (vmx->host_state.gs_sel & 7)) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
@@ -2757,7 +2749,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
- if (vmx->host_state.fs_reload_needed)
+ if (vmx->host_state.fs_sel & 7)
loadsegment(fs, vmx->host_state.fs_sel);
#ifdef CONFIG_X86_64
if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {