aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds2023-02-18 10:10:49 -0800
committerLinus Torvalds2023-02-18 10:10:49 -0800
commit0c2822b116e300ca6e3b7f98623deb760a93a1d2 (patch)
treeaa767bb4071c6d59c1b53912ee06a2fd2250f560 /arch
parent0e9fd589e61dace0dcc9848fbf6eb38f16d25f08 (diff)
parent853e2dac25c15f7431dfe59805de1bada34c96e9 (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 regression fix from Will Deacon: "Apologies for the _extremely_ late pull request here, but we had a 'perf' (i.e. CPU PMU) regression on the Apple M1 reported on Wednesday [1] which was introduced by bd2756811766 ("perf: Rewrite core context handling") during the merge window. Mark and I looked into this and noticed an additional problem caused by the same patch, where the 'CHAIN' event (used to combine two adjacent 32-bit counters into a single 64-bit counter) was not being filtered correctly. Mark posted a series on Thursday [2] which addresses both of these regressions and I queued it the same day. The changes are small, self-contained and have been confirmed to fix the original regression. Summary: - Fix 'perf' regression for non-standard CPU PMU hardware (i.e. Apple M1)" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: perf: reject CHAIN events at creation time arm_pmu: fix event CPU filtering
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/perf_event.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index a5193f2146a6..dde06c0f97f3 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1023,12 +1023,6 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
-static bool armv8pmu_filter(struct pmu *pmu, int cpu)
-{
- struct arm_pmu *armpmu = to_arm_pmu(pmu);
- return !cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus);
-}
-
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
@@ -1069,6 +1063,14 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
&armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
+ /*
+ * CHAIN events only work when paired with an adjacent counter, and it
+ * never makes sense for a user to open one in isolation, as they'll be
+ * rotated arbitrarily.
+ */
+ if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
+ return -EINVAL;
+
if (armv8pmu_event_is_64bit(event))
event->hw.flags |= ARMPMU_EVT_64BIT;
@@ -1258,7 +1260,6 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
cpu_pmu->stop = armv8pmu_stop;
cpu_pmu->reset = armv8pmu_reset;
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
- cpu_pmu->filter = armv8pmu_filter;
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;