diff options
author | Daniel T. Lee | 2023-08-18 18:01:17 +0900 |
---|---|---|
committer | Greg Kroah-Hartman | 2023-09-13 09:42:34 +0200 |
commit | 7984c381bbc1c4522d23a7c08886340376a6c5c5 (patch) | |
tree | eb6bb9b9ceb1bffe799717242303591120f3b060 /samples | |
parent | c813db76bc1531ae33d9d748ef9d211d802ae580 (diff) |
samples/bpf: fix broken map lookup probe
[ Upstream commit d93a7cf6ca2cfcd7de5d06f753ce8d5e863316ac ]
In the commit 7c4cd051add3 ("bpf: Fix syscall's stackmap lookup
potential deadlock"), a potential deadlock issue was addressed, which
resulted in *_map_lookup_elem not triggering BPF programs.
(prior to lookup, bpf_disable_instrumentation() is used)
To resolve the broken map lookup probe using "htab_map_lookup_elem",
this commit introduces an alternative approach. Instead, it utilize
"bpf_map_copy_value" and apply a filter specifically for the hash table
with map_type.
Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com>
Fixes: 7c4cd051add3 ("bpf: Fix syscall's stackmap lookup potential deadlock")
Link: https://lore.kernel.org/r/20230818090119.477441-8-danieltimlee@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/tracex6_kern.c | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c index acad5712d8b4..fd602c2774b8 100644 --- a/samples/bpf/tracex6_kern.c +++ b/samples/bpf/tracex6_kern.c @@ -2,6 +2,8 @@ #include <linux/version.h> #include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -45,13 +47,24 @@ int bpf_prog1(struct pt_regs *ctx) return 0; } -SEC("kprobe/htab_map_lookup_elem") -int bpf_prog2(struct pt_regs *ctx) +/* + * Since *_map_lookup_elem can't be expected to trigger bpf programs + * due to potential deadlocks (bpf_disable_instrumentation), this bpf + * program will be attached to bpf_map_copy_value (which is called + * from map_lookup_elem) and will only filter the hashtable type. + */ +SEC("kprobe/bpf_map_copy_value") +int BPF_KPROBE(bpf_prog2, struct bpf_map *map) { u32 key = bpf_get_smp_processor_id(); struct bpf_perf_event_value *val, buf; + enum bpf_map_type type; int error; + type = BPF_CORE_READ(map, map_type); + if (type != BPF_MAP_TYPE_HASH) + return 0; + error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf)); if (error) return 0; |