diff options
author | Oliver Upton | 2023-10-30 20:18:23 +0000 |
---|---|---|
committer | Oliver Upton | 2023-10-30 20:18:23 +0000 |
commit | 7ff7dfe946ab696a3335a98bdc30210f2b567825 (patch) | |
tree | ecfc2f2ea6318c8f51a29f8bc89c1e6ade2f58c8 /arch/arm64 | |
parent | d47dcb67fcf619df971f2c6df6ce1a81426917b6 (diff) | |
parent | ae8d3522e5b7208d238c893a00957864536983dc (diff) |
Merge branch kvm-arm64/pmevtyper-filter into kvmarm/next
* kvm-arm64/pmevtyper-filter:
: Fixes to KVM's handling of the PMUv3 exception level filtering bits
:
: - NSH (count at EL2) and M (count at EL3) should be stateful when the
: respective EL is advertised in the ID registers but have no effect on
: event counting.
:
: - NSU and NSK modify the event filtering of EL0 and EL1, respectively.
: Though the kernel may not use these bits, other KVM guests might.
: Implement these bits exactly as written in the pseudocode if EL3 is
: advertised.
KVM: arm64: Add PMU event filter bits required if EL3 is implemented
KVM: arm64: Make PMEVTYPER<n>_EL0.NSH RES0 if EL2 isn't advertised
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/kvm/pmu-emul.c | 36 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 8 |
2 files changed, 33 insertions, 11 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b066e04dc5d..e6be14e38cee 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -60,6 +60,23 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) return __kvm_pmu_event_mask(pmuver); } +u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | + kvm_pmu_event_mask(kvm); + u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1); + + if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0)) + mask |= ARMV8_PMU_INCLUDE_EL2; + + if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0)) + mask |= ARMV8_PMU_EXCLUDE_NS_EL0 | + ARMV8_PMU_EXCLUDE_NS_EL1 | + ARMV8_PMU_EXCLUDE_EL3; + + return mask; +} + /** * kvm_pmc_is_64bit - determine if counter is 64bit * @pmc: counter context @@ -584,6 +601,7 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) struct perf_event *event; struct perf_event_attr attr; u64 eventsel, reg, data; + bool p, u, nsk, nsu; reg = counter_index_to_evtreg(pmc->idx); data = __vcpu_sys_reg(vcpu, reg); @@ -610,13 +628,18 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) return; + p = data & ARMV8_PMU_EXCLUDE_EL1; + u = data & ARMV8_PMU_EXCLUDE_EL0; + nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1; + nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0; + memset(&attr, 0, sizeof(struct perf_event_attr)); attr.type = arm_pmu->pmu.type; attr.size = sizeof(attr); attr.pinned = 1; attr.disabled = !kvm_pmu_counter_is_enabled(pmc); - attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; - attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; + attr.exclude_user = (u != nsu); + attr.exclude_kernel = (p != nsk); attr.exclude_hv = 1; /* Don't count EL2 events */ attr.exclude_host = 1; /* Don't count host events */ attr.config = eventsel; @@ -657,18 +680,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) { struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); - u64 reg, mask; + u64 reg; if (!kvm_vcpu_has_pmu(vcpu)) return; - mask = ARMV8_PMU_EVTYPE_MASK; - mask &= ~ARMV8_PMU_EVTYPE_EVENT; - mask |= kvm_pmu_event_mask(vcpu->kvm); - reg = counter_index_to_evtreg(pmc->idx); - - __vcpu_sys_reg(vcpu, reg) = data & mask; + __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); kvm_pmu_create_perf_event(pmc); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 818a52e257ed..4e77d28a4e8b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -746,8 +746,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { + /* This thing will UNDEF, who cares about the reset value? */ + if (!kvm_vcpu_has_pmu(vcpu)) + return 0; + reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK; + __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); return __vcpu_sys_reg(vcpu, r->reg); } @@ -988,7 +992,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); kvm_vcpu_pmu_restore_guest(vcpu); } else { - p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; + p->regval = __vcpu_sys_reg(vcpu, reg); } return true; |