summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2021-02-12 14:08:41 +0000
committerMarc Zyngier <maz@kernel.org>2021-02-12 14:08:41 +0000
commitc93199e93e1232b7220482dffa05b7a32a195fe8 (patch)
tree8af55cb63c2c44c37455f0d79c06c5d92767f775
parent8cb68a9d147da4630603937e42e52c0b4ea1602e (diff)
parent8c358b29e0dc69d5ced6acfea4cc3d1dcf10df27 (diff)
Merge branch 'kvm-arm64/pmu-debug-fixes-5.11' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/kvm/pmu-emul.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c85
3 files changed, 64 insertions, 38 deletions
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 8b5e7e5c3cc8..2fb3f386588c 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -846,7 +846,10 @@
#define ID_DFR0_PERFMON_SHIFT 24
+#define ID_DFR0_PERFMON_8_0 0x3
#define ID_DFR0_PERFMON_8_1 0x4
+#define ID_DFR0_PERFMON_8_4 0x5
+#define ID_DFR0_PERFMON_8_5 0x6
#define ID_ISAR4_SWP_FRAC_SHIFT 28
#define ID_ISAR4_PSR_M_SHIFT 24
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 247422ac78a9..e9ec08b0b070 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -23,11 +23,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
switch (kvm->arch.pmuver) {
- case 1: /* ARMv8.0 */
+ case ID_AA64DFR0_PMUVER_8_0:
return GENMASK(9, 0);
- case 4: /* ARMv8.1 */
- case 5: /* ARMv8.4 */
- case 6: /* ARMv8.5 */
+ case ID_AA64DFR0_PMUVER_8_1:
+ case ID_AA64DFR0_PMUVER_8_4:
+ case ID_AA64DFR0_PMUVER_8_5:
return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
@@ -795,6 +795,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
base = 0;
} else {
val = read_sysreg(pmceid1_el0);
+ /*
+ * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
+ * as RAZ
+ */
+ if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
+ val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7c4f79532406..4f2f1e3145de 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -9,6 +9,7 @@
* Christoffer Dall <c.dall@virtualopensystems.com>
*/
+#include <linux/bitfield.h>
#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
@@ -700,14 +701,18 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 pmceid;
+ u64 pmceid, mask, shift;
BUG_ON(p->is_write);
if (pmu_access_el0_disabled(vcpu))
return false;
+ get_access_mask(r, &mask, &shift);
+
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
+ pmceid &= mask;
+ pmceid >>= shift;
p->regval = pmceid;
@@ -1021,6 +1026,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
+#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
+
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
@@ -1028,36 +1035,41 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
u32 id = reg_to_encoding(r);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
- if (id == SYS_ID_AA64PFR0_EL1) {
+ switch (id) {
+ case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu))
- val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
- val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
- val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
- val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
- val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT);
- val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT);
- } else if (id == SYS_ID_AA64PFR1_EL1) {
- val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
- } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
- val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_API_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
- } else if (id == SYS_ID_AA64DFR0_EL1) {
- u64 cap = 0;
-
- /* Limit guests to PMUv3 for ARMv8.1 */
- if (kvm_vcpu_has_pmu(vcpu))
- cap = ID_AA64DFR0_PMUVER_8_1;
-
+ val &= ~FEATURE(ID_AA64PFR0_SVE);
+ val &= ~FEATURE(ID_AA64PFR0_AMU);
+ val &= ~FEATURE(ID_AA64PFR0_CSV2);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
+ val &= ~FEATURE(ID_AA64PFR0_CSV3);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+ break;
+ case SYS_ID_AA64PFR1_EL1:
+ val &= ~FEATURE(ID_AA64PFR1_MTE);
+ break;
+ case SYS_ID_AA64ISAR1_EL1:
+ if (!vcpu_has_ptrauth(vcpu))
+ val &= ~(FEATURE(ID_AA64ISAR1_APA) |
+ FEATURE(ID_AA64ISAR1_API) |
+ FEATURE(ID_AA64ISAR1_GPA) |
+ FEATURE(ID_AA64ISAR1_GPI));
+ break;
+ case SYS_ID_AA64DFR0_EL1:
+ /* Limit debug to ARMv8.0 */
+ val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
+ val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
+ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
- ID_AA64DFR0_PMUVER_SHIFT,
- cap);
- } else if (id == SYS_ID_DFR0_EL1) {
- /* Limit guests to PMUv3 for ARMv8.1 */
+ ID_AA64DFR0_PMUVER_SHIFT,
+ kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
+ break;
+ case SYS_ID_DFR0_EL1:
+ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
- ID_DFR0_PERFMON_SHIFT,
- ID_DFR0_PERFMON_8_1);
+ ID_DFR0_PERFMON_SHIFT,
+ kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
+ break;
}
return val;
@@ -1493,6 +1505,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.access = access_pminten, .reg = PMINTENSET_EL1 },
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1 },
+ { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1720,7 +1733,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
};
-static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -1734,7 +1747,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
- | (6 << 16) | (el3 << 14) | (el3 << 12));
+ | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true;
}
}
@@ -1767,8 +1780,8 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
* guest. Revisit this one day, would this principle change.
*/
static const struct sys_reg_desc cp14_regs[] = {
- /* DBGIDR */
- { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+ /* DBGDIDR */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
/* DBGDTRRXext */
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
@@ -1918,8 +1931,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
- { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
- { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
+ { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
+ { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
@@ -1927,6 +1940,10 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
+ { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
+ { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
+ /* PMMIR */
+ { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
/* PRRR/MAIR0 */
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },