summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-07-20 09:31:35 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-07-20 09:31:35 +0200
commit6bdb486c5a628f7a927c2658166e3a5ef1f883e7 (patch)
tree4b817231c6f48216bc0deff7075c91b6c3ebdf35 /arch/arm64/kvm
parentda6d647598a6d182eb6a0344a7b14ae005244399 (diff)
parentba47d845d715a010f7b51f6f89bae32845e6acb7 (diff)
Merge 5.8-rc6 into driver-core-next
We need the driver core fixes in here too. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/pmu.c7
-rw-r--r--arch/arm64/kvm/pvtime.c15
-rw-r--r--arch/arm64/kvm/reset.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c8
5 files changed, 40 insertions, 11 deletions
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6e6ed5581eed..e76c0e89d48e 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f
-reset:
+
/*
- * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
- * case we coming via HVC_SOFT_RESTART.
+ * Set the HVC_RESET_VECTORS return code before entering the common
+ * path so that we do not clobber x0-x2 in case we are coming via
+ * HVC_SOFT_RESTART.
*/
+ mov x0, xzr
+reset:
+ /* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2
mov_q x6, SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc
@@ -151,7 +155,6 @@ reset:
/* Install stub vectors */
adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5
- mov x0, xzr
eret
1: /* Bad stub call */
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index b5ae3a5d509e..3c224162b3dd 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
}
/*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;
+ preempt_disable();
host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index 1e0f4c284888..f7b52ce1557e 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -3,6 +3,7 @@
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
#include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
+static bool kvm_arm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
int ret = 0;
int idx;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
u64 __user *user = (u64 __user *)attr->addr;
u64 ipa;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
{
switch (attr->attr) {
case KVM_ARM_VCPU_PVTIME_IPA:
- return 0;
+ if (kvm_arm_pvtime_supported())
+ return 0;
}
return -ENXIO;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index d3b209023727..6ed36be51b4b 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
- int ret = -EINVAL;
+ int ret;
bool loaded;
u32 pstate;
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
- if (kvm_vcpu_enable_ptrauth(vcpu))
+ if (kvm_vcpu_enable_ptrauth(vcpu)) {
+ ret = -EINVAL;
goto out;
+ }
}
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+ ret = -EINVAL;
goto out;
+ }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 27ac833e5ec7..b5fa73c9fd35 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
+ /*
+ * The v4.1 doorbell can fire concurrently with the vPE being
+ * made non-resident. Ensure we only update pending_last
+ * *after* the non-residency sequence has completed.
+ */
+ raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);