summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-06 09:18:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-06 09:18:21 -0400
commit80fac0f577a35c437219a2786c1804ab8ca1e998 (patch)
treedfe87e66f937b2b42ba6b87c4538b7e5bea06d07 /arch/x86
parent4305f42401b29e2e024bd064618faf25aef5cb69 (diff)
parent45e11817d5703eceb65a673927a8bc74dc1286d6 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini: - ARM bugfix and MSI injection support - x86 nested virt tweak and OOPS fix - Simplify pvclock code (vdso bits acked by Andy Lutomirski). * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: nvmx: mark ept single context invalidation as supported nvmx: remove comment about missing nested vpid support KVM: lapic: fix access preemption timer stuff even if kernel_irqchip=off KVM: documentation: fix KVM_CAP_X2APIC_API information x86: vdso: use __pvclock_read_cycles pvclock: introduce seqcount-like API arm64: KVM: Set cpsr before spsr on fault injection KVM: arm: vgic-irqfd: Workaround changing kvm_set_routing_entry prototype KVM: arm/arm64: Enable MSI routing KVM: arm/arm64: Enable irqchip routing KVM: Move kvm_setup_default/empty_irq_routing declaration in arch specific header KVM: irqchip: Convey devid to kvm_set_msi KVM: Add devid in kvm_kernel_irq_routing_entry KVM: api: Pass the devid in the msi routing entry
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c25
-rw-r--r--arch/x86/include/asm/pvclock.h39
-rw-r--r--arch/x86/kernel/pvclock.c17
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/vmx.c15
6 files changed, 47 insertions, 55 deletions
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 2f02d23a05ef..94d54d0defa7 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -96,9 +96,8 @@ static notrace cycle_t vread_pvclock(int *mode)
{
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
cycle_t ret;
- u64 tsc, pvti_tsc;
- u64 last, delta, pvti_system_time;
- u32 version, pvti_tsc_to_system_mul, pvti_tsc_shift;
+ u64 last;
+ u32 version;
/*
* Note: The kernel and hypervisor must guarantee that cpu ID
@@ -123,29 +122,15 @@ static notrace cycle_t vread_pvclock(int *mode)
*/
do {
- version = pvti->version;
-
- smp_rmb();
+ version = pvclock_read_begin(pvti);
if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
*mode = VCLOCK_NONE;
return 0;
}
- tsc = rdtsc_ordered();
- pvti_tsc_to_system_mul = pvti->tsc_to_system_mul;
- pvti_tsc_shift = pvti->tsc_shift;
- pvti_system_time = pvti->system_time;
- pvti_tsc = pvti->tsc_timestamp;
-
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while (unlikely((version & 1) || version != pvti->version));
-
- delta = tsc - pvti_tsc;
- ret = pvti_system_time +
- pvclock_scale_delta(delta, pvti_tsc_to_system_mul,
- pvti_tsc_shift);
+ ret = __pvclock_read_cycles(pvti);
+ } while (pvclock_read_retry(pvti, version));
/* refer to vread_tsc() comment for rationale */
last = gtod->cycle_last;
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7c1c89598688..d019f0cc80ec 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -25,6 +25,24 @@ void pvclock_resume(void);
void pvclock_touch_watchdogs(void);
+static __always_inline
+unsigned pvclock_read_begin(const struct pvclock_vcpu_time_info *src)
+{
+ unsigned version = src->version & ~1;
+ /* Make sure that the version is read before the data. */
+ virt_rmb();
+ return version;
+}
+
+static __always_inline
+bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
+ unsigned version)
+{
+ /* Make sure that the version is re-read after the data. */
+ virt_rmb();
+ return unlikely(version != src->version);
+}
+
/*
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result.
@@ -69,23 +87,12 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
}
static __always_inline
-unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
- cycle_t *cycles, u8 *flags)
+cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src)
{
- unsigned version;
- cycle_t offset;
- u64 delta;
-
- version = src->version;
- /* Make the latest version visible */
- smp_rmb();
-
- delta = rdtsc_ordered() - src->tsc_timestamp;
- offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
- src->tsc_shift);
- *cycles = src->system_time + offset;
- *flags = src->flags;
- return version;
+ u64 delta = rdtsc_ordered() - src->tsc_timestamp;
+ cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+ return src->system_time + offset;
}
struct pvclock_vsyscall_time_info {
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 06c58ce46762..3599404e3089 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -64,14 +64,9 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
u8 flags;
do {
- version = src->version;
- /* Make the latest version visible */
- smp_rmb();
-
+ version = pvclock_read_begin(src);
flags = src->flags;
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while ((src->version & 1) || version != src->version);
+ } while (pvclock_read_retry(src, version));
return flags & valid_flags;
}
@@ -84,10 +79,10 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
u8 flags;
do {
- version = __pvclock_read_cycles(src, &ret, &flags);
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while ((src->version & 1) || version != src->version);
+ version = pvclock_read_begin(src);
+ ret = __pvclock_read_cycles(src);
+ flags = src->flags;
+ } while (pvclock_read_retry(src, version));
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
src->flags &= ~PVCLOCK_GUEST_STOPPED;
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 61ebdc13a29a..035731eb3897 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -120,4 +120,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
int apic_has_pending_timer(struct kvm_vcpu *vcpu);
+int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_setup_empty_irq_routing(struct kvm *kvm);
+
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 730cf174090a..b62c85229711 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1349,6 +1349,9 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{
+ if (!lapic_in_kernel(vcpu))
+ return false;
+
return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
}
EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bc354f003ce1..a45d8580f91e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2809,12 +2809,8 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_ept_caps |=
VMX_EPT_EXECUTE_ONLY_BIT;
vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
- /*
- * For nested guests, we don't do anything specific
- * for single context invalidation. Hence, only advertise
- * support for global context invalidation.
- */
- vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
+ vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
+ VMX_EPT_EXTENT_CONTEXT_BIT;
} else
vmx->nested.nested_vmx_ept_caps = 0;
@@ -2945,7 +2941,6 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
vmx->nested.nested_vmx_secondary_ctls_high);
break;
case MSR_IA32_VMX_EPT_VPID_CAP:
- /* Currently, no nested vpid support */
*pdata = vmx->nested.nested_vmx_ept_caps |
((u64)vmx->nested.nested_vmx_vpid_caps << 32);
break;
@@ -7609,12 +7604,16 @@ static int handle_invept(struct kvm_vcpu *vcpu)
switch (type) {
case VMX_EPT_EXTENT_GLOBAL:
+ /*
+ * TODO: track mappings and invalidate
+ * single context requests appropriately
+ */
+ case VMX_EPT_EXTENT_CONTEXT:
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
nested_vmx_succeed(vcpu);
break;
default:
- /* Trap single context invalidation invept calls */
BUG_ON(1);
break;
}