aboutsummaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorNamhyung Kim2022-09-08 14:41:03 -0700
committerPeter Zijlstra2022-09-13 15:03:22 +0200
commit16817ad7e8b31728b44ff9f17d8d894ed8a450d0 (patch)
tree5d73d416cb0e2fe4b2f8f4c1a283ef3fff01a90b /kernel/events
parent3749d33e510c3dc695b3a5886b706310890d7ebd (diff)
perf/bpf: Always use perf callchains if exist
If the perf_event has PERF_SAMPLE_CALLCHAIN, BPF can use it for stack trace. The problematic cases like PEBS and IBS already handled in the PMU driver and they filled the callchain info in the sample data. For others, we can call perf_callchain() before the BPF handler. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220908214104.3851807-2-namhyung@kernel.org
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c98ecf3e09ba..7da5515082f9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10000,8 +10000,16 @@ static void bpf_overflow_handler(struct perf_event *event,
goto out;
rcu_read_lock();
prog = READ_ONCE(event->prog);
- if (prog)
+ if (prog) {
+ if (prog->call_get_stack &&
+ (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
+ !(data->sample_flags & PERF_SAMPLE_CALLCHAIN)) {
+ data->callchain = perf_callchain(event, regs);
+ data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
+ }
+
ret = bpf_prog_run(prog, &ctx);
+ }
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
@@ -10027,7 +10035,7 @@ static int perf_event_set_bpf_handler(struct perf_event *event,
if (event->attr.precise_ip &&
prog->call_get_stack &&
- (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) ||
+ (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
event->attr.exclude_callchain_kernel ||
event->attr.exclude_callchain_user)) {
/*