diff options
author | Linus Torvalds | 2023-10-30 13:44:35 -1000 |
---|---|---|
committer | Linus Torvalds | 2023-10-30 13:44:35 -1000 |
commit | bceb7accb7b60f9844807c7433af06493ed058b7 (patch) | |
tree | 1a0dfc1c85b3dabb2cac578df90a79d336b58dc9 /arch/x86 | |
parent | cd063c8b9e1e95560e90bac7816234d8b2ee2897 (diff) | |
parent | 744940f1921c8feb90e3c4bcc1e153fdd6e10fe2 (diff) |
Merge tag 'perf-core-2023-10-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull performance event updates from Ingo Molnar:
- Add AMD Unified Memory Controller (UMC) events introduced with Zen 4
- Simplify & clean up the uncore management code
- Fall back from RDPMC to RDMSR on certain uncore PMUs
- Improve per-package and cstate event reading
- Extend the Intel ref-cycles event to GP counters
- Fix Intel MTL event constraints
- Improve the Intel hybrid CPU handling code
- Micro-optimize the RAPL code
- Optimize perf_cgroup_switch()
- Improve large AUX area error handling
- Misc fixes and cleanups
* tag 'perf-core-2023-10-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
perf/x86/amd/uncore: Pass through error code for initialization failures, instead of -ENODEV
perf/x86/amd/uncore: Fix uninitialized return value in amd_uncore_init()
x86/cpu: Fix the AMD Fam 17h, Fam 19h, Zen2 and Zen4 MSR enumerations
perf: Optimize perf_cgroup_switch()
perf/x86/amd/uncore: Add memory controller support
perf/x86/amd/uncore: Add group exclusivity
perf/x86/amd/uncore: Use rdmsr if rdpmc is unavailable
perf/x86/amd/uncore: Move discovery and registration
perf/x86/amd/uncore: Refactor uncore management
perf/core: Allow reading package events from perf_event_read_local
perf/x86/cstate: Allow reading the package statistics from local CPU
perf/x86/intel/pt: Fix kernel-doc comments
perf/x86/rapl: Annotate 'struct rapl_pmus' with __counted_by
perf/core: Rename perf_proc_update_handler() -> perf_event_max_sample_rate_handler(), for readability
perf/x86/rapl: Fix "Using plain integer as NULL pointer" Sparse warning
perf/x86/rapl: Use local64_try_cmpxchg in rapl_event_update()
perf/x86/rapl: Stop doing cpu_relax() in the local64_cmpxchg() loop in rapl_event_update()
perf/core: Bail out early if the request AUX area is out of bound
perf/x86/intel: Extend the ref-cycles event to GP counters
perf/x86/intel: Fix broken fixed event constraints extension
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/events/amd/uncore.c | 1060 | ||||
-rw-r--r-- | arch/x86/events/core.c | 6 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 475 | ||||
-rw-r--r-- | arch/x86/events/intel/cstate.c | 3 | ||||
-rw-r--r-- | arch/x86/events/intel/ds.c | 4 | ||||
-rw-r--r-- | arch/x86/events/intel/pt.c | 8 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 37 | ||||
-rw-r--r-- | arch/x86/events/rapl.c | 22 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 9 |
10 files changed, 1007 insertions, 627 deletions
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 83f15fe411b3..4429eac00185 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -26,57 +26,66 @@ #define RDPMC_BASE_LLC 10 #define COUNTER_SHIFT 16 +#define UNCORE_NAME_LEN 16 +#define UNCORE_GROUP_MAX 256 #undef pr_fmt #define pr_fmt(fmt) "amd_uncore: " fmt static int pmu_version; -static int num_counters_llc; -static int num_counters_nb; -static bool l3_mask; -static HLIST_HEAD(uncore_unused_list); - -struct amd_uncore { - int id; +struct amd_uncore_ctx { int refcnt; int cpu; + struct perf_event **events; + struct hlist_node node; +}; + +struct amd_uncore_pmu { + char name[UNCORE_NAME_LEN]; int num_counters; int rdpmc_base; u32 msr_base; - cpumask_t *active_mask; - struct pmu *pmu; - struct perf_event **events; - struct hlist_node node; + int group; + cpumask_t active_mask; + struct pmu pmu; + struct amd_uncore_ctx * __percpu *ctx; }; -static struct amd_uncore * __percpu *amd_uncore_nb; -static struct amd_uncore * __percpu *amd_uncore_llc; +enum { + UNCORE_TYPE_DF, + UNCORE_TYPE_L3, + UNCORE_TYPE_UMC, -static struct pmu amd_nb_pmu; -static struct pmu amd_llc_pmu; + UNCORE_TYPE_MAX +}; -static cpumask_t amd_nb_active_mask; -static cpumask_t amd_llc_active_mask; +union amd_uncore_info { + struct { + u64 aux_data:32; /* auxiliary data */ + u64 num_pmcs:8; /* number of counters */ + u64 gid:8; /* group id */ + u64 cid:8; /* context id */ + } split; + u64 full; +}; -static bool is_nb_event(struct perf_event *event) -{ - return event->pmu->type == amd_nb_pmu.type; -} +struct amd_uncore { + union amd_uncore_info * __percpu info; + struct amd_uncore_pmu *pmus; + unsigned int num_pmus; + bool init_done; + void (*scan)(struct amd_uncore *uncore, unsigned int cpu); + int (*init)(struct amd_uncore *uncore, unsigned int cpu); + void (*move)(struct amd_uncore *uncore, unsigned int cpu); + void (*free)(struct amd_uncore *uncore, unsigned int cpu); +}; -static bool is_llc_event(struct perf_event *event) -{ - return event->pmu->type == amd_llc_pmu.type; -} +static struct amd_uncore uncores[UNCORE_TYPE_MAX]; -static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) +static struct amd_uncore_pmu *event_to_amd_uncore_pmu(struct perf_event *event) { - if (is_nb_event(event) && amd_uncore_nb) - return *per_cpu_ptr(amd_uncore_nb, event->cpu); - else if (is_llc_event(event) && amd_uncore_llc) - return *per_cpu_ptr(amd_uncore_llc, event->cpu); - - return NULL; + return container_of(event->pmu, struct amd_uncore_pmu, pmu); } static void amd_uncore_read(struct perf_event *event) @@ -91,7 +100,16 @@ static void amd_uncore_read(struct perf_event *event) */ prev = local64_read(&hwc->prev_count); - rdpmcl(hwc->event_base_rdpmc, new); + + /* + * Some uncore PMUs do not have RDPMC assignments. In such cases, + * read counts directly from the corresponding PERF_CTR. + */ + if (hwc->event_base_rdpmc < 0) + rdmsrl(hwc->event_base, new); + else + rdpmcl(hwc->event_base_rdpmc, new); + local64_set(&hwc->prev_count, new); delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); delta >>= COUNTER_SHIFT; @@ -118,7 +136,7 @@ static void amd_uncore_stop(struct perf_event *event, int flags) hwc->state |= PERF_HES_STOPPED; if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { - amd_uncore_read(event); + event->pmu->read(event); hwc->state |= PERF_HES_UPTODATE; } } @@ -126,15 +144,16 @@ static void amd_uncore_stop(struct perf_event *event, int flags) static int amd_uncore_add(struct perf_event *event, int flags) { int i; - struct amd_uncore *uncore = event_to_amd_uncore(event); + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; /* are we already assigned? */ - if (hwc->idx != -1 && uncore->events[hwc->idx] == event) + if (hwc->idx != -1 && ctx->events[hwc->idx] == event) goto out; - for (i = 0; i < uncore->num_counters; i++) { - if (uncore->events[i] == event) { + for (i = 0; i < pmu->num_counters; i++) { + if (ctx->events[i] == event) { hwc->idx = i; goto out; } @@ -142,8 +161,8 @@ static int amd_uncore_add(struct perf_event *event, int flags) /* if not, take the first available counter */ hwc->idx = -1; - for (i = 0; i < uncore->num_counters; i++) { - if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { + for (i = 0; i < pmu->num_counters; i++) { + if (cmpxchg(&ctx->events[i], NULL, event) == NULL) { hwc->idx = i; break; } @@ -153,23 +172,16 @@ out: if (hwc->idx == -1) return -EBUSY; - hwc->config_base = uncore->msr_base + (2 * hwc->idx); - hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx); - hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx; + hwc->config_base = pmu->msr_base + (2 * hwc->idx); + hwc->event_base = pmu->msr_base + 1 + (2 * hwc->idx); + hwc->event_base_rdpmc = pmu->rdpmc_base + hwc->idx; hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - /* - * The first four DF counters are accessible via RDPMC index 6 to 9 - * followed by the L3 counters from index 10 to 15. For processors - * with more than four DF counters, the DF RDPMC assignments become - * discontiguous as the additional counters are accessible starting - * from index 16. - */ - if (is_nb_event(event) && hwc->idx >= NUM_COUNTERS_NB) - hwc->event_base_rdpmc += NUM_COUNTERS_L3; + if (pmu->rdpmc_base < 0) + hwc->event_base_rdpmc = -1; if (flags & PERF_EF_START) - amd_uncore_start(event, PERF_EF_RELOAD); + event->pmu->start(event, PERF_EF_RELOAD); return 0; } @@ -177,55 +189,36 @@ out: static void amd_uncore_del(struct perf_event *event, int flags) { int i; - struct amd_uncore *uncore = event_to_amd_uncore(event); + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; - amd_uncore_stop(event, PERF_EF_UPDATE); + event->pmu->stop(event, PERF_EF_UPDATE); - for (i = 0; i < uncore->num_counters; i++) { - if (cmpxchg(&uncore->events[i], event, NULL) == event) + for (i = 0; i < pmu->num_counters; i++) { + if (cmpxchg(&ctx->events[i], event, NULL) == event) break; } hwc->idx = -1; } -/* - * Return a full thread and slice mask unless user - * has provided them - */ -static u64 l3_thread_slice_mask(u64 config) -{ - if (boot_cpu_data.x86 <= 0x18) - return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | - ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); - - /* - * If the user doesn't specify a threadmask, they're not trying to - * count core 0, so we enable all cores & threads. - * We'll also assume that they want to count slice 0 if they specify - * a threadmask and leave sliceid and enallslices unpopulated. - */ - if (!(config & AMD64_L3_F19H_THREAD_MASK)) - return AMD64_L3_F19H_THREAD_MASK | AMD64_L3_EN_ALL_SLICES | - AMD64_L3_EN_ALL_CORES; - - return config & (AMD64_L3_F19H_THREAD_MASK | AMD64_L3_SLICEID_MASK | - AMD64_L3_EN_ALL_CORES | AMD64_L3_EN_ALL_SLICES | - AMD64_L3_COREID_MASK); -} - static int amd_uncore_event_init(struct perf_event *event) { - struct amd_uncore *uncore; + struct amd_uncore_pmu *pmu; + struct amd_uncore_ctx *ctx; struct hw_perf_event *hwc = &event->hw; - u64 event_mask = AMD64_RAW_EVENT_MASK_NB; if (event->attr.type != event->pmu->type) return -ENOENT; - if (pmu_version >= 2 && is_nb_event(event)) - event_mask = AMD64_PERFMON_V2_RAW_EVENT_MASK_NB; + if (event->cpu < 0) + return -EINVAL; + + pmu = event_to_amd_uncore_pmu(event); + ctx = *per_cpu_ptr(pmu->ctx, event->cpu); + if (!ctx) + return -ENODEV; /* * NB and Last level cache counters (MSRs) are shared across all cores @@ -235,28 +228,14 @@ static int amd_uncore_event_init(struct perf_event *event) * out. So we do not support sampling and per-thread events via * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts: */ - hwc->config = event->attr.config & event_mask; + hwc->config = event->attr.config; hwc->idx = -1; - if (event->cpu < 0) - return -EINVAL; - - /* - * SliceMask and ThreadMask need to be set for certain L3 events. - * For other events, the two fields do not affect the count. - */ - if (l3_mask && is_llc_event(event)) - hwc->config |= l3_thread_slice_mask(event->attr.config); - - uncore = event_to_amd_uncore(event); - if (!uncore) - return -ENODEV; - /* * since request can come in to any of the shared cores, we will remap * to a single common cpu. */ - event->cpu = uncore->cpu; + event->cpu = ctx->cpu; return 0; } @@ -278,17 +257,10 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { - cpumask_t *active_mask; - struct pmu *pmu = dev_get_drvdata(dev); + struct pmu *ptr = dev_get_drvdata(dev); + struct amd_uncore_pmu *pmu = container_of(ptr, struct amd_uncore_pmu, pmu); - if (pmu->type == amd_nb_pmu.type) - active_mask = &amd_nb_active_mask; - else if (pmu->type == amd_llc_pmu.type) - active_mask = &amd_llc_active_mask; - else - return 0; - - return cpumap_print_to_pagebuf(true, buf, active_mask); + return cpumap_print_to_pagebuf(true, buf, &pmu->active_mask); } static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL); @@ -315,7 +287,7 @@ static struct device_attribute format_attr_##_var = \ DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35"); DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */ DEFINE_UNCORE_FORMAT_ATTR(event14v2, event, "config:0-7,32-37"); /* PerfMonV2 DF */ -DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */ +DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3, PerfMonV2 UMC */ DEFINE_UNCORE_FORMAT_ATTR(umask8, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(umask12, umask, "config:8-15,24-27"); /* PerfMonV2 DF */ DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */ @@ -325,6 +297,7 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(rdwrmask, rdwrmask, "config:8-9"); /* PerfMonV2 UMC */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -341,6 +314,13 @@ static struct attribute *amd_uncore_l3_format_attr[] = { NULL, }; +/* Common UMC attributes */ +static struct attribute *amd_uncore_umc_format_attr[] = { + &format_attr_event8.attr, /* event */ + &format_attr_rdwrmask.attr, /* rdwrmask */ + NULL, +}; + /* F17h unique L3 attributes */ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { &format_attr_slicemask.attr, /* slicemask */ @@ -378,6 +358,11 @@ static struct attribute_group amd_f19h_uncore_l3_format_group = { .is_visible = amd_f19h_uncore_is_visible, }; +static struct attribute_group amd_uncore_umc_format_group = { + .name = "format", + .attrs = amd_uncore_umc_format_attr, +}; + static const struct attribute_group *amd_uncore_df_attr_groups[] = { &amd_uncore_attr_group, &amd_uncore_df_format_group, @@ -396,259 +381,636 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; -static struct pmu amd_nb_pmu = { - .task_ctx_nr = perf_invalid_context, - .attr_groups = amd_uncore_df_attr_groups, - .name = "amd_nb", - .event_init = amd_uncore_event_init, - .add = amd_uncore_add, - .del = amd_uncore_del, - .start = amd_uncore_start, - .stop = amd_uncore_stop, - .read = amd_uncore_read, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, - .module = THIS_MODULE, +static const struct attribute_group *amd_uncore_umc_attr_groups[] = { + &amd_uncore_attr_group, + &amd_uncore_umc_format_group, + NULL, }; -static struct pmu amd_llc_pmu = { - .task_ctx_nr = perf_invalid_context, - .attr_groups = amd_uncore_l3_attr_groups, - .attr_update = amd_uncore_l3_attr_update, - .name = "amd_l2", - .event_init = amd_uncore_event_init, - .add = amd_uncore_add, - .del = amd_uncore_del, - .start = amd_uncore_start, - .stop = amd_uncore_stop, - .read = amd_uncore_read, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, - .module = THIS_MODULE, -}; +static __always_inline +int amd_uncore_ctx_cid(struct amd_uncore *uncore, unsigned int cpu) +{ + union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); + return info->split.cid; +} -static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) +static __always_inline +int amd_uncore_ctx_gid(struct amd_uncore *uncore, unsigned int cpu) { - return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, - cpu_to_node(cpu)); + union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); + return info->split.gid; } -static inline struct perf_event ** -amd_uncore_events_alloc(unsigned int num, unsigned int cpu) +static __always_inline +int amd_uncore_ctx_num_pmcs(struct amd_uncore *uncore, unsigned int cpu) { - return kzalloc_node(sizeof(struct perf_event *) * num, GFP_KERNEL, - cpu_to_node(cpu)); + union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); + return info->split.num_pmcs; } -static int amd_uncore_cpu_up_prepare(unsigned int cpu) +static void amd_uncore_ctx_free(struct amd_uncore *uncore, unsigned int cpu) { - struct amd_uncore *uncore_nb = NULL, *uncore_llc = NULL; + struct amd_uncore_pmu *pmu; + struct amd_uncore_ctx *ctx; + int i; - if (amd_uncore_nb) { - *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; - uncore_nb = amd_uncore_alloc(cpu); - if (!uncore_nb) - goto fail; - uncore_nb->cpu = cpu; - uncore_nb->num_counters = num_counters_nb; - uncore_nb->rdpmc_base = RDPMC_BASE_NB; - uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL; - uncore_nb->active_mask = &amd_nb_active_mask; - uncore_nb->pmu = &amd_nb_pmu; - uncore_nb->events = amd_uncore_events_alloc(num_counters_nb, cpu); - if (!uncore_nb->events) - goto fail; - uncore_nb->id = -1; - *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; + if (!uncore->init_done) + return; + + for (i = 0; i < uncore->num_pmus; i++) { + pmu = &uncore->pmus[i]; + ctx = *per_cpu_ptr(pmu->ctx, cpu); + if (!ctx) + continue; + + if (cpu == ctx->cpu) + cpumask_clear_cpu(cpu, &pmu->active_mask); + + if (!--ctx->refcnt) { + kfree(ctx->events); + kfree(ctx); + } + + *per_cpu_ptr(pmu->ctx, cpu) = NULL; } +} - if (amd_uncore_llc) { - *per_cpu_ptr(amd_uncore_llc, cpu) = NULL; - uncore_llc = amd_uncore_alloc(cpu); - if (!uncore_llc) - goto fail; - uncore_llc->cpu = cpu; - uncore_llc->num_counters = num_counters_llc; - uncore_llc->rdpmc_base = RDPMC_BASE_LLC; - uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL; - uncore_llc->active_mask = &amd_llc_active_mask; - uncore_llc->pmu = &amd_llc_pmu; - uncore_llc->events = amd_uncore_events_alloc(num_counters_llc, cpu); - if (!uncore_llc->events) - goto fail; - uncore_llc->id = -1; - *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc; +static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu) +{ + struct amd_uncore_ctx *curr, *prev; + struct amd_uncore_pmu *pmu; + int node, cid, gid, i, j; + + if (!uncore->init_done || !uncore->num_pmus) + return 0; + + cid = amd_uncore_ctx_cid(uncore, cpu); + gid = amd_uncore_ctx_gid(uncore, cpu); + + for (i = 0; i < uncore->num_pmus; i++) { + pmu = &uncore->pmus[i]; + *per_cpu_ptr(pmu->ctx, cpu) = NULL; + curr = NULL; + + /* Check for group exclusivity */ + if (gid != pmu->group) + continue; + + /* Find a sibling context */ + for_each_online_cpu(j) { + if (cpu == j) + continue; + + prev = *per_cpu_ptr(pmu->ctx, j); + if (!prev) + continue; + + if (cid == amd_uncore_ctx_cid(uncore, j)) { + curr = prev; + break; + } + } + + /* Allocate context if sibling does not exist */ + if (!curr) { + node = cpu_to_node(cpu); + curr = kzalloc_node(sizeof(*curr), GFP_KERNEL, node); + if (!curr) + goto fail; + + curr->cpu = cpu; + curr->events = kzalloc_node(sizeof(*curr->events) * + pmu->num_counters, + GFP_KERNEL, node); + if (!curr->events) { + kfree(curr); + goto fail; + } + + cpumask_set_cpu(cpu, &pmu->active_mask); + } + + curr->refcnt++; + *per_cpu_ptr(pmu->ctx, cpu) = curr; } return 0; fail: - if (uncore_nb) { - kfree(uncore_nb->events); - kfree(uncore_nb); - } - - if (uncore_llc) { - kfree(uncore_llc->events); - kfree(uncore_llc); - } + amd_uncore_ctx_free(uncore, cpu); return -ENOMEM; } -static struct amd_uncore * -amd_uncore_find_online_sibling(struct amd_uncore *this, - struct amd_uncore * __percpu *uncores) +static void amd_uncore_ctx_move(struct amd_uncore *uncore, unsigned int cpu) { - unsigned int cpu; - struct amd_uncore *that; - - for_each_online_cpu(cpu) { - that = *per_cpu_ptr(uncores, cpu); + struct amd_uncore_ctx *curr, *next; + struct amd_uncore_pmu *pmu; + int i, j; - if (!that) - continue; + if (!uncore->init_done) + return; - if (this == that) + for (i = 0; i < uncore->num_pmus; i++) { + pmu = &uncore->pmus[i]; + curr = *per_cpu_ptr(pmu->ctx, cpu); + if (!curr) continue; - if (this->id == that->id) { - hlist_add_head(&this->node, &uncore_unused_list); - this = that; - break; + /* Migrate to a shared sibling if possible */ + for_each_online_cpu(j) { + next = *per_cpu_ptr(pmu->ctx, j); + if (!next || cpu == j) + continue; + + if (curr == next) { + perf_pmu_migrate_context(&pmu->pmu, cpu, j); + cpumask_clear_cpu(cpu, &pmu->active_mask); + cpumask_set_cpu(j, &pmu->active_mask); + next->cpu = j; + break; + } } } - - this->refcnt++; - return this; } static int amd_uncore_cpu_starting(unsigned int cpu) { - unsigned int eax, ebx, ecx, edx; struct amd_uncore *uncore; + int i; + + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; + uncore->scan(uncore, cpu); + } + + return 0; +} - if (amd_uncore_nb) { - uncore = *per_cpu_ptr(amd_uncore_nb, cpu); - cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); - uncore->id = ecx & 0xff; +static int amd_uncore_cpu_online(unsigned int cpu) +{ + struct amd_uncore *uncore; + int i; - uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb); - *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; + if (uncore->init(uncore, cpu)) + break; } - if (amd_uncore_llc) { - uncore = *per_cpu_ptr(amd_uncore_llc, cpu); - uncore->id = get_llc_id(cpu); + return 0; +} - uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc); - *per_cpu_ptr(amd_uncore_llc, cpu) = uncore; +static int amd_uncore_cpu_down_prepare(unsigned int cpu) +{ + struct amd_uncore *uncore; + int i; + + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; + uncore->move(uncore, cpu); } return 0; } -static void uncore_clean_online(void) +static int amd_uncore_cpu_dead(unsigned int cpu) { struct amd_uncore *uncore; - struct hlist_node *n; + int i; - hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) { - hlist_del(&uncore->node); - kfree(uncore->events); - kfree(uncore); + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; + uncore->free(uncore, cpu); } + + return 0; } -static void uncore_online(unsigned int cpu, - struct amd_uncore * __percpu *uncores) +static int amd_uncore_df_event_init(struct perf_event *event) { - struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); + struct hw_perf_event *hwc = &event->hw; + int ret = amd_uncore_event_init(event); - uncore_clean_online(); + if (ret || pmu_version < 2) + return ret; - if (cpu == uncore->cpu) - cpumask_set_cpu(cpu, uncore->active_mask); + hwc->config = event->attr.config & + (pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB : + AMD64_RAW_EVENT_MASK_NB); + + return 0; } -static int amd_uncore_cpu_online(unsigned int cpu) +static int amd_uncore_df_add(struct perf_event *event, int flags) { - if (amd_uncore_nb) - uncore_online(cpu, amd_uncore_nb); + int ret = amd_uncore_add(event, flags & ~PERF_EF_START); + struct hw_perf_event *hwc = &event->hw; + + if (ret) + return ret; + + /* + * The first four DF counters are accessible via RDPMC index 6 to 9 + * followed by the L3 counters from index 10 to 15. For processors + * with more than four DF counters, the DF RDPMC assignments become + * discontiguous as the additional counters are accessible starting + * from index 16. + */ + if (hwc->idx >= NUM_COUNTERS_NB) + hwc->event_base_rdpmc += NUM_COUNTERS_L3; - if (amd_uncore_llc) - uncore_online(cpu, amd_uncore_llc); + /* Delayed start after rdpmc base update */ + if (flags & PERF_EF_START) + amd_uncore_start(event, PERF_EF_RELOAD); return 0; } -static void uncore_down_prepare(unsigned int cpu, - struct amd_uncore * __percpu *uncores) +static +void amd_uncore_df_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) { - unsigned int i; - struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); + union cpuid_0x80000022_ebx ebx; + union amd_uncore_info info; - if (this->cpu != cpu) + if (!boot_cpu_has(X86_FEATURE_PERFCTR_NB)) return; - /* this cpu is going down, migrate to a shared sibling if possible */ - for_each_online_cpu(i) { - struct amd_uncore *that = *per_cpu_ptr(uncores, i); + info.split.aux_data = 0; + info.split.num_pmcs = NUM_COUNTERS_NB; + info.split.gid = 0; + info.split.cid = topology_die_id(cpu); - if (cpu == i) - continue; + if (pmu_version >= 2) { + ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES); + info.split.num_pmcs = ebx.split.num_df_pmc; + } - if (this == that) { - perf_pmu_migrate_context(this->pmu, cpu, i); - cpumask_clear_cpu(cpu, that->active_mask); - cpumask_set_cpu(i, that->active_mask); - that->cpu = i; - break; - } + *per_cpu_ptr(uncore->info, cpu) = info; +} + +static +int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu) +{ + struct attribute **df_attr = amd_uncore_df_format_attr; + struct amd_uncore_pmu *pmu; + + /* Run just once */ + if (uncore->init_done) + return amd_uncore_ctx_init(uncore, cpu); + + /* No grouping, single instance for a system */ + uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL); + if (!uncore->pmus) { + uncore->num_pmus = 0; + goto done; } + + /* + * For Family 17h and above, the Northbridge counters are repurposed + * as Data Fabric counters. The PMUs are exported based on family as + * either NB or DF. + */ + pmu = &uncore->pmus[0]; + strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_df" : "amd_nb", + sizeof(pmu->name)); + pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu); + pmu->msr_base = MSR_F15H_NB_PERF_CTL; + pmu->rdpmc_base = RDPMC_BASE_NB; + pmu->group = amd_uncore_ctx_gid(uncore, cpu); + + if (pmu_version >= 2) { + *df_attr++ = &format_attr_event14v2.attr; + *df_attr++ = &format_attr_umask12.attr; + } else if (boot_cpu_data.x86 >= 0x17) { + *df_attr = &format_attr_event14.attr; + } + + pmu->ctx = alloc_percpu(struct amd_uncore_ctx *); + if (!pmu->ctx) + goto done; + + pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_df_attr_groups, + .name = pmu->name, + .event_init = amd_uncore_df_event_init, + .add = amd_uncore_df_add, + .del = amd_uncore_del, + .start = amd_uncore_start, + .stop = amd_uncore_stop, + .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, + .module = THIS_MODULE, + }; + + if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) { + free_percpu(pmu->ctx); + pmu->ctx = NULL; + goto done; + } + + pr_info("%d %s%s counters detected\n", pmu->num_counters, + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON " : "", + pmu->pmu.name); + + uncore->num_pmus = 1; + +done: + uncore->init_done = true; + + return amd_uncore_ctx_init(uncore, cpu); } -static int amd_uncore_cpu_down_prepare(unsigned int cpu) +static int amd_uncore_l3_event_init(struct perf_event *event) { - if (amd_uncore_nb) - uncore_down_prepare(cpu, amd_uncore_nb); + int ret = amd_uncore_event_init(event); + struct hw_perf_event *hwc = &event->hw; + u64 config = event->attr.config; + u64 mask; - if (amd_uncore_llc) - uncore_down_prepare(cpu, amd_uncore_llc); + hwc->config = config & AMD64_RAW_EVENT_MASK_NB; + + /* + * SliceMask and ThreadMask need to be set for certain L3 events. + * For other events, the two fields do not affect the count. + */ + if (ret || boot_cpu_data.x86 < 0x17) + return ret; + + mask = config & (AMD64_L3_F19H_THREAD_MASK | AMD64_L3_SLICEID_MASK | + AMD64_L3_EN_ALL_CORES | AMD64_L3_EN_ALL_SLICES | + AMD64_L3_COREID_MASK); + + if (boot_cpu_data.x86 <= 0x18) + mask = ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + + /* + * If the user doesn't specify a ThreadMask, they're not trying to + * count core 0, so we enable all cores & threads. + * We'll also assume that they want to count slice 0 if they specify + * a ThreadMask and leave SliceId and EnAllSlices unpopulated. + */ + else if (!(config & AMD64_L3_F19H_THREAD_MASK)) + mask = AMD64_L3_F19H_THREAD_MASK | AMD64_L3_EN_ALL_SLICES | + AMD64_L3_EN_ALL_CORES; + + hwc->config |= mask; return 0; } -static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) +static +void amd_uncore_l3_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) { - struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); + union amd_uncore_info info; - if (cpu == uncore->cpu) - cpumask_clear_cpu(cpu, uncore->active_mask); + if (!boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) + return; + + info.split.aux_data = 0; + info.split.num_pmcs = NUM_COUNTERS_L2; + info.split.gid = 0; + info.split.cid = get_llc_id(cpu); - if (!--uncore->refcnt) { - kfree(uncore->events); - kfree(uncore); + if (boot_cpu_data.x86 >= 0x17) + info.split.num_pmcs = NUM_COUNTERS_L3; + + *per_cpu_ptr(uncore->info, cpu) = info; +} + +static +int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu) +{ + struct attribute **l3_attr = amd_uncore_l3_format_attr; + struct amd_uncore_pmu *pmu; + + /* Run just once */ + if (uncore->init_done) + return amd_uncore_ctx_init(uncore, cpu); + + /* No grouping, single instance for a system */ + uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL); + if (!uncore->pmus) { + uncore->num_pmus = 0; + goto done; } - *per_cpu_ptr(uncores, cpu) = NULL; + /* + * For Family 17h and above, L3 cache counters are available instead + * of L2 cache counters. The PMUs are exported based on family as + * either L2 or L3. + */ + pmu = &uncore->pmus[0]; + strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_l3" : "amd_l2", + sizeof(pmu->name)); + pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu); + pmu->msr_base = MSR_F16H_L2I_PERF_CTL; + pmu->rdpmc_base = RDPMC_BASE_LLC; + pmu->group = amd_uncore_ctx_gid(uncore, cpu); + + if (boot_cpu_data.x86 >= 0x17) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + *l3_attr++ = boot_cpu_data.x86 >= 0x19 ? + &format_attr_threadmask2.attr : + &format_attr_threadmask8.attr; + } + + pmu->ctx = alloc_percpu(struct amd_uncore_ctx *); + if (!pmu->ctx) + goto done; + + pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_l3_attr_groups, + .attr_update = amd_uncore_l3_attr_update, + .name = pmu->name, + .event_init = amd_uncore_l3_event_init, + .add = amd_uncore_add, + .del = amd_uncore_del, + .start = amd_uncore_start, + .stop = amd_uncore_stop, + .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, + .module = THIS_MODULE, + }; + + if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) { + free_percpu(pmu->ctx); + pmu->ctx = NULL; + goto done; + } + + pr_info("%d %s%s counters detected\n", pmu->num_counters, + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON " : "", + pmu->pmu.name); + + uncore->num_pmus = 1; + +done: + uncore->init_done = true; + + return amd_uncore_ctx_init(uncore, cpu); } -static int amd_uncore_cpu_dead(unsigned int cpu) +static int amd_uncore_umc_event_init(struct perf_event *event) { - if (amd_uncore_nb) - uncore_dead(cpu, amd_uncore_nb); + struct hw_perf_event *hwc = &event->hw; + int ret = amd_uncore_event_init(event); + + if (ret) + return ret; - if (amd_uncore_llc) - uncore_dead(cpu, amd_uncore_llc); + hwc->config = event->attr.config & AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC; return 0; } -static int __init amd_uncore_init(void) +static void amd_uncore_umc_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (flags & PERF_EF_RELOAD) + wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); + + hwc->state = 0; + wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); + perf_event_update_userpage(event); +} + +static +void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) { - struct attribute **df_attr = amd_uncore_df_format_attr; - struct attribute **l3_attr = amd_uncore_l3_format_attr; union cpuid_0x80000022_ebx ebx; + union amd_uncore_info info; + unsigned int eax, ecx, edx; + + if (pmu_version < 2) + return; + + cpuid(EXT_PERFMON_DEBUG_FEATURES, &eax, &ebx.full, &ecx, &edx); + info.split.aux_data = ecx; /* stash active mask */ + info.split.num_pmcs = ebx.split.num_umc_pmc; + info.split.gid = topology_die_id(cpu); + info.split.cid = topology_die_id(cpu); + *per_cpu_ptr(uncore->info, cpu) = info; +} + +static +int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu) +{ + DECLARE_BITMAP(gmask, UNCORE_GROUP_MAX) = { 0 }; + u8 group_num_pmus[UNCORE_GROUP_MAX] = { 0 }; + u8 group_num_pmcs[UNCORE_GROUP_MAX] = { 0 }; + union amd_uncore_info info; + struct amd_uncore_pmu *pmu; + int index = 0, gid, i; + + if (pmu_version < 2) + return 0; + + /* Run just once */ + if (uncore->init_done) + return amd_uncore_ctx_init(uncore, cpu); + + /* Find unique groups */ + for_each_online_cpu(i) { + info = *per_cpu_ptr(uncore->info, i); + gid = info.split.gid; + if (test_bit(gid, gmask)) + continue; + + __set_bit(gid, gmask); + group_num_pmus[gid] = hweight32(info.split.aux_data); + group_num_pmcs[gid] = info.split.num_pmcs; + uncore->num_pmus += group_num_pmus[gid]; + } + + uncore->pmus = kzalloc(sizeof(*uncore->pmus) * uncore->num_pmus, + GFP_KERNEL); + if (!uncore->pmus) { + uncore->num_pmus = 0; + goto done; + } + + for_each_set_bit(gid, gmask, UNCORE_GROUP_MAX) { + for (i = 0; i < group_num_pmus[gid]; i++) { + pmu = &uncore->pmus[index]; + snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index); + pmu->num_counters = group_num_pmcs[gid] / group_num_pmus[gid]; + pmu->msr_base = MSR_F19H_UMC_PERF_CTL + i * pmu->num_counters * 2; + pmu->rdpmc_base = -1; + pmu->group = gid; + + pmu->ctx = alloc_percpu(struct amd_uncore_ctx *); + if (!pmu->ctx) + goto done; + + pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_umc_attr_groups, + .name = pmu->name, + .event_init = amd_uncore_umc_event_init, + .add = amd_uncore_add, + .del = amd_uncore_del, + .start = amd_uncore_umc_start, + .stop = amd_uncore_stop, + .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, + .module = THIS_MODULE, + }; + + if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) { + free_percpu(pmu->ctx); + pmu->ctx = NULL; + goto done; + } + + pr_info("%d %s counters detected\n", pmu->num_counters, + pmu->pmu.name); + + index++; + } + } + +done: + uncore->num_pmus = index; + uncore->init_done = true; + + return amd_uncore_ctx_init(uncore, cpu); +} + +static struct amd_uncore uncores[UNCORE_TYPE_MAX] = { + /* UNCORE_TYPE_DF */ + { + .scan = amd_uncore_df_ctx_scan, + .init = amd_uncore_df_ctx_init, + .move = amd_uncore_ctx_move, + .free = amd_uncore_ctx_free, + }, + /* UNCORE_TYPE_L3 */ + { + .scan = amd_uncore_l3_ctx_scan, + .init = amd_uncore_l3_ctx_init, + .move = amd_uncore_ctx_move, + .free = amd_uncore_ctx_free, + }, + /* UNCORE_TYPE_UMC */ + { + .scan = amd_uncore_umc_ctx_scan, + .init = amd_uncore_umc_ctx_init, + .move = amd_uncore_ctx_move, + .free = amd_uncore_ctx_free, + }, +}; + +static int __init amd_uncore_init(void) +{ + struct amd_uncore *uncore; int ret = -ENODEV; + int i; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) @@ -660,125 +1022,91 @@ static int __init amd_uncore_init(void) if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) pmu_version = 2; - num_counters_nb = NUM_COUNTERS_NB; - num_counters_llc = NUM_COUNTERS_L2; - if (boot_cpu_data.x86 >= 0x17) { - /* - * For F17h and above, the Northbridge counters are - * repurposed as Data Fabric counters. Also, L3 - * counters are supported too. The PMUs are exported - * based on family as either L2 or L3 and NB or DF. - */ - num_counters_llc = NUM_COUNTERS_L3; - amd_nb_pmu.name = "amd_df"; - amd_llc_pmu.name = "amd_l3"; - l3_mask = true; - } + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; - if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { - if (pmu_version >= 2) { - *df_attr++ = &format_attr_event14v2.attr; - *df_attr++ = &format_attr_umask12.attr; - } else if (boot_cpu_data.x86 >= 0x17) { - *df_attr = &format_attr_event14.attr; - } + BUG_ON(!uncore->scan); + BUG_ON(!uncore->init); + BUG_ON(!uncore->move); + BUG_ON(!uncore->free); - amd_uncore_nb = alloc_percpu(struct amd_uncore *); - if (!amd_uncore_nb) { + uncore->info = alloc_percpu(union amd_uncore_info); + if (!uncore->info) { ret = -ENOMEM; - goto fail_nb; - } - ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); - if (ret) - goto fail_nb; - - if (pmu_version >= 2) { - ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES); - num_counters_nb = ebx.split.num_df_pmc; - } - - pr_info("%d %s %s counters detected\n", num_counters_nb, - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON" : "", - amd_nb_pmu.name); - - ret = 0; - } - - if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) { - if (boot_cpu_data.x86 >= 0x19) { - *l3_attr++ = &format_attr_event8.attr; - *l3_attr++ = &format_attr_umask8.attr; - *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { - *l3_attr++ = &format_attr_event8.attr; - *l3_attr++ = &format_attr_umask8.attr; - *l3_attr++ = &format_attr_threadmask8.attr; - } - - amd_uncore_llc = alloc_percpu(struct amd_uncore *); - if (!amd_uncore_llc) { - ret = -ENOMEM; - goto fail_llc; + goto fail; } - ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1); - if (ret) - goto fail_llc; - - pr_info("%d %s %s counters detected\n", num_counters_llc, - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON" : "", - amd_llc_pmu.name); - ret = 0; - } + }; /* * Install callbacks. Core will call them for each online cpu. */ - if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, - "perf/x86/amd/uncore:prepare", - amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead)) - goto fail_llc; - - if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, - "perf/x86/amd/uncore:starting", - amd_uncore_cpu_starting, NULL)) + ret = cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, + "perf/x86/amd/uncore:prepare", + NULL, amd_uncore_cpu_dead); + if (ret) + goto fail; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, + "perf/x86/amd/uncore:starting", + amd_uncore_cpu_starting, NULL); + if (ret) goto fail_prep; - if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, - "perf/x86/amd/uncore:online", - amd_uncore_cpu_online, - amd_uncore_cpu_down_prepare)) + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + "perf/x86/amd/uncore:online", + amd_uncore_cpu_online, + amd_uncore_cpu_down_prepare); + if (ret) goto fail_start; + return 0; fail_start: cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING); fail_prep: cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP); -fail_llc: - if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) - perf_pmu_unregister(&amd_nb_pmu); - free_percpu(amd_uncore_llc); -fail_nb: - free_percpu(amd_uncore_nb); +fail: + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; + if (uncore->info) { + free_percpu(uncore->info); + uncore->info = NULL; + } + } return ret; } static void __exit amd_uncore_exit(void) { + struct amd_uncore *uncore; + struct amd_uncore_pmu *pmu; + int i, j; + cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE); cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING); cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP); - if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) { - perf_pmu_unregister(&amd_llc_pmu); - free_percpu(amd_uncore_llc); - amd_uncore_llc = NULL; - } + for (i = 0; i < UNCORE_TYPE_MAX; i++) { + uncore = &uncores[i]; + if (!uncore->info) + continue; + + free_percpu(uncore->info); + uncore->info = NULL; + + for (j = 0; j < uncore->num_pmus; j++) { + pmu = &uncore->pmus[j]; + if (!pmu->ctx) + continue; + + perf_pmu_unregister(&pmu->pmu); + free_percpu(pmu->ctx); + pmu->ctx = NULL; + } - if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { - perf_pmu_unregister(&amd_nb_pmu); - free_percpu(amd_uncore_nb); - amd_uncore_nb = NULL; + kfree(uncore->pmus); + uncore->pmus = NULL; } } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 185f902e5f28..40ad1425ffa2 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1887,9 +1887,9 @@ ssize_t events_hybrid_sysfs_show(struct device *dev, str = pmu_attr->event_str; for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type)) + if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type)) continue; - if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) { + if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) { next_str = strchr(str, ';'); if (next_str) return snprintf(page, next_str - str + 1, "%s", str); @@ -2169,7 +2169,7 @@ static int __init init_hw_perf_events(void) hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, - (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1); + (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1); if (err) break; } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fa355d3658a6..a08f794a0e79 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -211,6 +211,14 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; +static struct event_constraint intel_grt_event_constraints[] __read_mostly = { + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ + EVENT_CONSTRAINT_END +}; + static struct event_constraint intel_skl_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -299,7 +307,7 @@ static struct extra_reg intel_icl_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; -static struct extra_reg intel_spr_extra_regs[] __read_mostly = { +static struct extra_reg intel_glc_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), @@ -309,11 +317,12 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; -static struct event_constraint intel_spr_event_constraints[] = { +static struct event_constraint intel_glc_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), @@ -349,7 +358,7 @@ static struct event_constraint intel_spr_event_constraints[] = { EVENT_CONSTRAINT_END }; -static struct extra_reg intel_gnr_extra_regs[] __read_mostly = { +static struct extra_reg intel_rwc_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), @@ -473,7 +482,7 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } -static __initconst const u64 spr_hw_cache_event_ids +static __initconst const u64 glc_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -552,7 +561,7 @@ static __initconst const u64 spr_hw_cache_event_ids }, }; -static __initconst const u64 spr_hw_cache_extra_regs +static __initconst const u64 glc_hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -2556,16 +2565,6 @@ static int icl_set_topdown_event_period(struct perf_event *event) return 0; } -static int adl_set_topdown_event_period(struct perf_event *event) -{ - struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - - if (pmu->cpu_type != hybrid_big) - return 0; - - return icl_set_topdown_event_period(event); -} - DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) @@ -2708,16 +2707,6 @@ static u64 icl_update_topdown_event(struct perf_event *event) x86_pmu.num_topdown_events - 1); } -static u64 adl_update_topdown_event(struct perf_event *event) -{ - struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - - if (pmu->cpu_type != hybrid_big) - return 0; - - return icl_update_topdown_event(event); -} - DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); static void intel_pmu_read_topdown_event(struct perf_event *event) @@ -3869,7 +3858,7 @@ static inline bool require_mem_loads_aux_event(struct perf_event *event) return false; if (is_hybrid()) - return hybrid_pmu(event->pmu)->cpu_type == hybrid_big; + return hybrid_pmu(event->pmu)->pmu_type == hybrid_big; return true; } @@ -4273,7 +4262,7 @@ icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, } static struct event_constraint * -spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx, +glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { struct event_constraint *c; @@ -4361,9 +4350,9 @@ adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) - return spr_get_event_constraints(cpuc, idx, event); - else if (pmu->cpu_type == hybrid_small) + if (pmu->pmu_type == hybrid_big) + return glc_get_event_constraints(cpuc, idx, event); + else if (pmu->pmu_type == hybrid_small) return tnt_get_event_constraints(cpuc, idx, event); WARN_ON(1); @@ -4409,7 +4398,7 @@ rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct event_constraint *c; - c = spr_get_event_constraints(cpuc, idx, event); + c = glc_get_event_constraints(cpuc, idx, event); /* The Retire Latency is not supported by the fixed counter 0. */ if (event->attr.precise_ip && @@ -4433,9 +4422,9 @@ mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return rwc_get_event_constraints(cpuc, idx, event); - if (pmu->cpu_type == hybrid_small) + if (pmu->pmu_type == hybrid_small) return cmt_get_event_constraints(cpuc, idx, event); WARN_ON(1); @@ -4446,18 +4435,18 @@ static int adl_hw_config(struct perf_event *event) { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return hsw_hw_config(event); - else if (pmu->cpu_type == hybrid_small) + else if (pmu->pmu_type == hybrid_small) return intel_pmu_hw_config(event); WARN_ON(1); return -EOPNOTSUPP; } -static u8 adl_get_hybrid_cpu_type(void) +static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void) { - return hybrid_big; + return HYBRID_INTEL_CORE; } /* @@ -4490,7 +4479,7 @@ static void nhm_limit_period(struct perf_event *event, s64 *left) *left = max(*left, 32LL); } -static void spr_limit_period(struct perf_event *event, s64 *left) +static void glc_limit_period(struct perf_event *event, s64 *left) { if (event->attr.precise_ip == 3) *left = max(*left, 128LL); @@ -4618,6 +4607,23 @@ static void intel_pmu_check_num_counters(int *num_counters, int *num_counters_fixed, u64 *intel_ctrl, u64 fixed_mask); +static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, + int num_counters, + int num_counters_fixed, + u64 intel_ctrl); + +static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); + +static inline bool intel_pmu_broken_perf_cap(void) +{ + /* The Perf Metric (Bit 15) is always cleared */ + if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) || + (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L)) + return true; + + return false; +} + static void update_pmu_cap(struct x86_hybrid_pmu *pmu) { unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF); @@ -4628,27 +4634,83 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) &eax, &ebx, &ecx, &edx); pmu->num_counters = fls(eax); pmu->num_counters_fixed = fls(ebx); - intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, - &pmu->intel_ctrl, ebx); + } + + + if (!intel_pmu_broken_perf_cap()) { + /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ + rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); } } -static bool init_hybrid_pmu(int cpu) +static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) +{ + intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, + &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1); + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); + + if (pmu->intel_cap.perf_metrics) + pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + else + pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); + + if (pmu->intel_cap.pebs_output_pt_available) + pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + else + pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT; + + intel_pmu_check_event_constraints(pmu->event_constraints, + pmu->num_counters, + pmu->num_counters_fixed, + pmu->intel_ctrl); + + intel_pmu_check_extra_regs(pmu->extra_regs); +} + +static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); u8 cpu_type = get_this_hybrid_cpu_type(); - struct x86_hybrid_pmu *pmu = NULL; int i; - if (!cpu_type && x86_pmu.get_hybrid_cpu_type) - cpu_type = x86_pmu.get_hybrid_cpu_type(); + /* + * This is running on a CPU model that is known to have hybrid + * configurations. But the CPU told us it is not hybrid, shame + * on it. There should be a fixup function provided for these + * troublesome CPUs (->get_hybrid_cpu_type). + */ + if (cpu_type == HYBRID_INTEL_NONE) { + if (x86_pmu.get_hybrid_cpu_type) + cpu_type = x86_pmu.get_hybrid_cpu_type(); + else + return NULL; + } + /* + * This essentially just maps between the 'hybrid_cpu_type' + * and 'hybrid_pmu_type' enums: + */ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) { - pmu = &x86_pmu.hybrid_pmu[i]; - break; - } + enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; + + if (cpu_type == HYBRID_INTEL_CORE && + pmu_type == hybrid_big) + return &x86_pmu.hybrid_pmu[i]; + if (cpu_type == HYBRID_INTEL_ATOM && + pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; } + + return NULL; +} + +static bool init_hybrid_pmu(int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu(); + if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { cpuc->pmu = NULL; return false; @@ -4661,6 +4723,8 @@ static bool init_hybrid_pmu(int cpu) if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) update_pmu_cap(pmu); + intel_pmu_check_hybrid_pmus(pmu); + if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) return false; @@ -5337,14 +5401,14 @@ static struct attribute *icl_tsx_events_attrs[] = { EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); -static struct attribute *spr_events_attrs[] = { +static struct attribute *glc_events_attrs[] = { EVENT_PTR(mem_ld_hsw), EVENT_PTR(mem_st_spr), EVENT_PTR(mem_ld_aux), NULL, }; -static struct attribute *spr_td_events_attrs[] = { +static struct attribute *glc_td_events_attrs[] = { EVENT_PTR(slots), EVENT_PTR(td_retiring), EVENT_PTR(td_bad_spec), @@ -5357,7 +5421,7 @@ static struct attribute *spr_td_events_attrs[] = { NULL, }; -static struct attribute *spr_tsx_events_attrs[] = { +static struct attribute *glc_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_abort), EVENT_PTR(tx_commit), @@ -5699,7 +5763,7 @@ static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) struct perf_pmu_events_hybrid_attr *pmu_attr = container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); - return pmu->cpu_type & pmu_attr->pmu_type; + return pmu->pmu_type & pmu_attr->pmu_type; } static umode_t hybrid_events_is_visible(struct kobject *kobj, @@ -5736,7 +5800,7 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj, container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); int cpu = hybrid_find_supported_cpu(pmu); - return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; + return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; } static struct attribute_group hybrid_group_events_td = { @@ -5880,40 +5944,105 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) } } -static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) +static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { + { hybrid_small, "cpu_atom" }, + { hybrid_big, "cpu_core" }, +}; + +static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) { + unsigned long pmus_mask = pmus; struct x86_hybrid_pmu *pmu; - int i; + int idx = 0, bit; - for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - pmu = &x86_pmu.hybrid_pmu[i]; + x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); + x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus, + sizeof(struct x86_hybrid_pmu), + GFP_KERNEL); + if (!x86_pmu.hybrid_pmu) + return -ENOMEM; - intel_pmu_check_num_counters(&pmu->num_counters, - &pmu->num_counters_fixed, - &pmu->intel_ctrl, - fixed_mask); + static_branch_enable(&perf_is_hybrid); + x86_pmu.filter = intel_pmu_filter; - if (pmu->intel_cap.perf_metrics) { - pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS; + for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) { + pmu = &x86_pmu.hybrid_pmu[idx++]; + pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; + pmu->name = intel_hybrid_pmu_type_map[bit].name; + + pmu->num_counters = x86_pmu.num_counters; + pmu->num_counters_fixed = x86_pmu.num_counters_fixed; + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); + + pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; + if (pmu->pmu_type & hybrid_small) { + pmu->intel_cap.perf_metrics = 0; + pmu->intel_cap.pebs_output_pt_available = 1; + pmu->mid_ack = true; + } else if (pmu->pmu_type & hybrid_big) { + pmu->intel_cap.perf_metrics = 1; + pmu->intel_cap.pebs_output_pt_available = 0; + pmu->late_ack = true; } + } - if (pmu->intel_cap.pebs_output_pt_available) - pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + return 0; +} - intel_pmu_check_event_constraints(pmu->event_constraints, - pmu->num_counters, - pmu->num_counters_fixed, - pmu->intel_ctrl); +static __always_inline void intel_pmu_ref_cycles_ext(void) +{ + if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED))) + intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c; +} - intel_pmu_check_extra_regs(pmu->extra_regs); - } +static __always_inline void intel_pmu_init_glc(struct pmu *pmu) +{ + x86_pmu.late_ack = true; + x86_pmu.limit_period = glc_limit_period; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); + x86_pmu.lbr_pt_coexist = true; + x86_pmu.num_topdown_events = 8; + static_call_update(intel_pmu_update_topdown_event, + &icl_update_topdown_event); + static_call_update(intel_pmu_set_topdown_event_period, + &icl_set_topdown_event_period); + + memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hybrid(pmu, event_constraints) = intel_glc_event_constraints; + hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; + + intel_pmu_ref_cycles_ext(); } -static __always_inline bool is_mtl(u8 x86_model) +static __always_inline void intel_pmu_init_grt(struct pmu *pmu) { - return (x86_model == INTEL_FAM6_METEORLAKE) || - (x86_model == INTEL_FAM6_METEORLAKE_L); + x86_pmu.mid_ack = true; + x86_pmu.limit_period = glc_limit_period; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.lbr_pt_coexist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + + memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + hybrid(pmu, event_constraints) = intel_grt_event_constraints; + hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; + hybrid(pmu, extra_regs) = intel_grt_extra_regs; + + intel_pmu_ref_cycles_ext(); } __init int intel_pmu_init(void) @@ -6194,28 +6323,10 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GRACEMONT: - x86_pmu.mid_ack = true; - memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, - sizeof(hw_cache_extra_regs)); - hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - - x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; - x86_pmu.extra_regs = intel_grt_extra_regs; - - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.lbr_pt_coexist = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - + intel_pmu_init_grt(NULL); intel_pmu_pebs_data_source_grt(); x86_pmu.pebs_latency_data = adl_latency_data_small; x86_pmu.get_event_constraints = tnt_get_event_constraints; - x86_pmu.limit_period = spr_limit_period; td_attr = tnt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = nhm_format_attr; @@ -6225,28 +6336,11 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ATOM_CRESTMONT: case INTEL_FAM6_ATOM_CRESTMONT_X: - x86_pmu.mid_ack = true; - memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, - sizeof(hw_cache_extra_regs)); - hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - - x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; + intel_pmu_init_grt(NULL); x86_pmu.extra_regs = intel_cmt_extra_regs; - - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.lbr_pt_coexist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - intel_pmu_pebs_data_source_cmt(); x86_pmu.pebs_latency_data = mtl_latency_data_small; x86_pmu.get_event_constraints = cmt_get_event_constraints; - x86_pmu.limit_period = spr_limit_period; td_attr = cmt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = cmt_format_attr; @@ -6563,44 +6657,23 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_EMERALDRAPIDS_X: x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; - x86_pmu.extra_regs = intel_spr_extra_regs; + x86_pmu.extra_regs = intel_glc_extra_regs; fallthrough; case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_FAM6_GRANITERAPIDS_D: - pmem = true; - x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); - - x86_pmu.event_constraints = intel_spr_event_constraints; - x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints; + intel_pmu_init_glc(NULL); if (!x86_pmu.extra_regs) - x86_pmu.extra_regs = intel_gnr_extra_regs; - x86_pmu.limit_period = spr_limit_period; + x86_pmu.extra_regs = intel_rwc_extra_regs; x86_pmu.pebs_ept = 1; - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - x86_pmu.hw_config = hsw_hw_config; - x86_pmu.get_event_constraints = spr_get_event_constraints; + x86_pmu.get_event_constraints = glc_get_event_constraints; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_skl_attr = skl_format_attr; - mem_attr = spr_events_attrs; - td_attr = spr_td_events_attrs; - tsx_attr = spr_tsx_events_attrs; - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); - x86_pmu.lbr_pt_coexist = true; - intel_pmu_pebs_data_source_skl(pmem); - x86_pmu.num_topdown_events = 8; - static_call_update(intel_pmu_update_topdown_event, - &icl_update_topdown_event); - static_call_update(intel_pmu_set_topdown_event_period, - &icl_set_topdown_event_period); + mem_attr = glc_events_attrs; + td_attr = glc_td_events_attrs; + tsx_attr = glc_tsx_events_attrs; + intel_pmu_pebs_data_source_skl(true); pr_cont("Sapphire Rapids events, "); name = "sapphire_rapids"; break; @@ -6610,47 +6683,17 @@ __init int intel_pmu_init(void) case INTEL_FAM6_RAPTORLAKE: case INTEL_FAM6_RAPTORLAKE_P: case INTEL_FAM6_RAPTORLAKE_S: - case INTEL_FAM6_METEORLAKE: - case INTEL_FAM6_METEORLAKE_L: /* * Alder Lake has 2 types of CPU, core and atom. * * Initialize the common PerfMon capabilities here. */ - x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS, - sizeof(struct x86_hybrid_pmu), - GFP_KERNEL); - if (!x86_pmu.hybrid_pmu) - return -ENOMEM; - static_branch_enable(&perf_is_hybrid); - x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; + intel_pmu_init_hybrid(hybrid_big_small); - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - x86_pmu.lbr_pt_coexist = true; x86_pmu.pebs_latency_data = adl_latency_data_small; - x86_pmu.num_topdown_events = 8; - static_call_update(intel_pmu_update_topdown_event, - &adl_update_topdown_event); - static_call_update(intel_pmu_set_topdown_event_period, - &adl_set_topdown_event_period); - - x86_pmu.filter = intel_pmu_filter; x86_pmu.get_event_constraints = adl_get_event_constraints; x86_pmu.hw_config = adl_hw_config; - x86_pmu.limit_period = spr_limit_period; x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; - /* - * The rtm_abort_event is used to check whether to enable GPRs - * for the RTM abort event. Atom doesn't have the RTM abort - * event. There is no harmful to set it in the common - * x86_pmu.rtm_abort_event. - */ - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); td_attr = adl_hybrid_events_attrs; mem_attr = adl_hybrid_mem_attrs; @@ -6660,9 +6703,7 @@ __init int intel_pmu_init(void) /* Initialize big core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; - pmu->name = "cpu_core"; - pmu->cpu_type = hybrid_big; - pmu->late_ack = true; + intel_pmu_init_glc(&pmu->pmu); if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { pmu->num_counters = x86_pmu.num_counters + 2; pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; @@ -6687,54 +6728,45 @@ __init int intel_pmu_init(void) pmu->unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 0, pmu->num_counters, 0, 0); - pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - pmu->intel_cap.perf_metrics = 1; - pmu->intel_cap.pebs_output_pt_available = 0; + pmu->extra_regs = intel_glc_extra_regs; + + /* Initialize Atom core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; + intel_pmu_init_grt(&pmu->pmu); + + x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; + intel_pmu_pebs_data_source_adl(); + pr_cont("Alderlake Hybrid events, "); + name = "alderlake_hybrid"; + break; + + case INTEL_FAM6_METEORLAKE: + case INTEL_FAM6_METEORLAKE_L: + intel_pmu_init_hybrid(hybrid_big_small); - memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->event_constraints = intel_spr_event_constraints; - pmu->pebs_constraints = intel_spr_pebs_event_constraints; - pmu->extra_regs = intel_spr_extra_regs; + x86_pmu.pebs_latency_data = mtl_latency_data_small; + x86_pmu.get_event_constraints = mtl_get_event_constraints; + x86_pmu.hw_config = adl_hw_config; + + td_attr = adl_hybrid_events_attrs; + mem_attr = mtl_hybrid_mem_attrs; + tsx_attr = adl_hybrid_tsx_attrs; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; + + /* Initialize big core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; + intel_pmu_init_glc(&pmu->pmu); + pmu->extra_regs = intel_rwc_extra_regs; /* Initialize Atom core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; - pmu->name = "cpu_atom"; - pmu->cpu_type = hybrid_small; - pmu->mid_ack = true; - pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; - pmu->max_pebs_events = x86_pmu.max_pebs_events; - pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); - pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - pmu->intel_cap.perf_metrics = 0; - pmu->intel_cap.pebs_output_pt_available = 1; - - memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - pmu->event_constraints = intel_slm_event_constraints; - pmu->pebs_constraints = intel_grt_pebs_event_constraints; - pmu->extra_regs = intel_grt_extra_regs; - if (is_mtl(boot_cpu_data.x86_model)) { - x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs; - x86_pmu.pebs_latency_data = mtl_latency_data_small; - extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? - mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; - mem_attr = mtl_hybrid_mem_attrs; - intel_pmu_pebs_data_source_mtl(); - x86_pmu.get_event_constraints = mtl_get_event_constraints; - pmu->extra_regs = intel_cmt_extra_regs; - pr_cont("Meteorlake Hybrid events, "); - name = "meteorlake_hybrid"; - } else { - x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; - intel_pmu_pebs_data_source_adl(); - pr_cont("Alderlake Hybrid events, "); - name = "alderlake_hybrid"; - } + intel_pmu_init_grt(&pmu->pmu); + pmu->extra_regs = intel_cmt_extra_regs; + + intel_pmu_pebs_data_source_mtl(); + pr_cont("Meteorlake Hybrid events, "); + name = "meteorlake_hybrid"; break; default: @@ -6846,9 +6878,6 @@ __init int intel_pmu_init(void) if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - if (is_hybrid()) - intel_pmu_check_hybrid_pmus((u64)fixed_mask); - if (x86_pmu.intel_cap.pebs_timing_info) x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 96fffb2d521d..cbeb6d2bf5b4 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -336,6 +336,9 @@ static int cstate_pmu_event_init(struct perf_event *event) cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); if (!(pkg_msr_mask & (1 << cfg))) return -EINVAL; + + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + event->hw.event_base = pkg_msr[cfg].msr; cpu = cpumask_any_and(&cstate_pkg_cpu_mask, topology_die_cpumask(event->cpu)); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index eb8dd8b8a1e8..bf97ab904d40 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -261,7 +261,7 @@ static u64 __adl_latency_data_small(struct perf_event *event, u64 status, { u64 val; - WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big); + WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); dse &= PERF_PEBS_DATA_SOURCE_MASK; val = hybrid_var(event->pmu, pebs_data_source)[dse]; @@ -1058,7 +1058,7 @@ struct event_constraint intel_icl_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; -struct event_constraint intel_spr_pebs_event_constraints[] = { +struct event_constraint intel_glc_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 42a55794004a..8e2a12235e62 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -736,6 +736,7 @@ static bool topa_table_full(struct topa *topa) /** * topa_insert_pages() - create a list of ToPA tables * @buf: PT buffer being initialized. + * @cpu: CPU on which to allocate. * @gfp: Allocation flags. * * This initializes a list of ToPA tables with entries from @@ -1207,8 +1208,11 @@ static void pt_buffer_fini_topa(struct pt_buffer *buf) /** * pt_buffer_init_topa() - initialize ToPA table for pt buffer * @buf: PT buffer. - * @size: Total size of all regions within this ToPA. + * @cpu: CPU on which to allocate. + * @nr_pages: No. of pages to allocate. * @gfp: Allocation flags. + * + * Return: 0 on success or error code. */ static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu, unsigned long nr_pages, gfp_t gfp) @@ -1281,7 +1285,7 @@ out: /** * pt_buffer_setup_aux() - set up topa tables for a PT buffer - * @cpu: Cpu on which to allocate, -1 means current. + * @event: Performance event * @pages: Array of pointers to buffer pages passed from perf core. * @nr_pages: Number of pages in the buffer. * @snapshot: If this is a snapshot/overwrite counter. diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index c8ba2be7585d..53dd5d495ba6 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -652,10 +652,29 @@ enum { #define PERF_PEBS_DATA_SOURCE_MAX 0x10 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1) +enum hybrid_cpu_type { + HYBRID_INTEL_NONE, + HYBRID_INTEL_ATOM = 0x20, + HYBRID_INTEL_CORE = 0x40, +}; + +enum hybrid_pmu_type { + not_hybrid, + hybrid_small = BIT(0), + hybrid_big = BIT(1), + + hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ +}; + +#define X86_HYBRID_PMU_ATOM_IDX 0 +#define X86_HYBRID_PMU_CORE_IDX 1 + +#define X86_HYBRID_NUM_PMUS 2 + struct x86_hybrid_pmu { struct pmu pmu; const char *name; - u8 cpu_type; + enum hybrid_pmu_type pmu_type; cpumask_t supported_cpus; union perf_capabilities intel_cap; u64 intel_ctrl; @@ -721,18 +740,6 @@ extern struct static_key_false perf_is_hybrid; __Fp; \ }) -enum hybrid_pmu_type { - hybrid_big = 0x40, - hybrid_small = 0x20, - - hybrid_big_small = hybrid_big | hybrid_small, -}; - -#define X86_HYBRID_PMU_ATOM_IDX 0 -#define X86_HYBRID_PMU_CORE_IDX 1 - -#define X86_HYBRID_NUM_PMUS 2 - /* * struct x86_pmu - generic x86 pmu */ @@ -940,7 +947,7 @@ struct x86_pmu { */ int num_hybrid_pmus; struct x86_hybrid_pmu *hybrid_pmu; - u8 (*get_hybrid_cpu_type) (void); + enum hybrid_cpu_type (*get_hybrid_cpu_type) (void); }; struct x86_perf_task_context_opt { @@ -1521,7 +1528,7 @@ extern struct event_constraint intel_skl_pebs_event_constraints[]; extern struct event_constraint intel_icl_pebs_event_constraints[]; -extern struct event_constraint intel_spr_pebs_event_constraints[]; +extern struct event_constraint intel_glc_pebs_event_constraints[]; struct event_constraint *intel_pebs_constraints(struct perf_event *event); diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 1579429846cc..8d98d468b976 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -115,7 +115,7 @@ struct rapl_pmu { struct rapl_pmus { struct pmu pmu; unsigned int maxdie; - struct rapl_pmu *pmus[]; + struct rapl_pmu *pmus[] __counted_by(maxdie); }; enum rapl_unit_quirk { @@ -179,15 +179,11 @@ static u64 rapl_event_update(struct perf_event *event) s64 delta, sdelta; int shift = RAPL_CNTR_WIDTH; -again: prev_raw_count = local64_read(&hwc->prev_count); - rdmsrl(event->hw.event_base, new_raw_count); - - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) { - cpu_relax(); - goto again; - } + do { + rdmsrl(event->hw.event_base, new_raw_count); + } while (!local64_try_cmpxchg(&hwc->prev_count, + &prev_raw_count, new_raw_count)); /* * Now we have the new raw value and have updated the prev @@ -537,11 +533,11 @@ static struct perf_msr intel_rapl_spr_msrs[] = { * - want to use same event codes across both architectures */ static struct perf_msr amd_rapl_msrs[] = { - [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, 0, false, 0 }, + [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, NULL, false, 0 }, [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK }, - [PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, 0, false, 0 }, - [PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, 0, false, 0 }, - [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, 0, false, 0 }, + [PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, NULL, false, 0 }, + [PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, NULL, false, 0 }, + [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 }, }; static int rapl_cpu_offline(unsigned int cpu) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 61705dd0e94a..e3fa9cecd599 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -638,12 +638,16 @@ #define MSR_AMD64_LBR_SELECT 0xc000010e /* Zen4 */ -#define MSR_ZEN4_BP_CFG 0xc001102e +#define MSR_ZEN4_BP_CFG 0xc001102e #define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5 +/* Fam 19h MSRs */ +#define MSR_F19H_UMC_PERF_CTL 0xc0010800 +#define MSR_F19H_UMC_PERF_CTR 0xc0010801 + /* Zen 2 */ -#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 -#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) +#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 +#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) /* Fam 17h MSRs */ #define MSR_F17H_IRPERF 0xc00000e9 diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 85a9fd5a3ec3..2618ec7c3d1d 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -112,6 +112,13 @@ (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \ AMD64_PERFMON_V2_EVENTSEL_UMASK_NB) +#define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31) +#define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0) +#define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8) +#define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \ + (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \ + AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC) + #define AMD64_NUM_COUNTERS 4 #define AMD64_NUM_COUNTERS_CORE 6 #define AMD64_NUM_COUNTERS_NB 4 @@ -232,6 +239,8 @@ union cpuid_0x80000022_ebx { unsigned int lbr_v2_stack_sz:6; /* Number of Data Fabric Counters */ unsigned int num_df_pmc:6; + /* Number of Unified Memory Controller Counters */ + unsigned int num_umc_pmc:6; } split; unsigned int full; }; |