diff options
author | Linus Torvalds | 2022-04-03 12:26:01 -0700 |
---|---|---|
committer | Linus Torvalds | 2022-04-03 12:26:01 -0700 |
commit | 09bb8856d4a7cf3128dedd79cd07d75bbf4a9f04 (patch) | |
tree | d0ae1e3042793fe397ad65f5fdb05bde32de31e9 /kernel | |
parent | 34a53ff911eb30629baad788fbed892f711bdd3e (diff) | |
parent | 5cfff569cab8bf544bab62c911c5d6efd5af5e05 (diff) |
Merge tag 'trace-v5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull more tracing updates from Steven Rostedt:
- Rename the staging files to give them some meaning. Just
stage1,stag2,etc, does not show what they are for
- Check for NULL from allocation in bootconfig
- Hold event mutex for dyn_event call in user events
- Mark user events to broken (to work on the API)
- Remove eBPF updates from user events
- Remove user events from uapi header to keep it from being installed.
- Move ftrace_graph_is_dead() into inline as it is called from hot
paths and also convert it into a static branch.
* tag 'trace-v5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Move user_events.h temporarily out of include/uapi
ftrace: Make ftrace_graph_is_dead() a static branch
tracing: Set user_events to BROKEN
tracing/user_events: Remove eBPF interfaces
tracing/user_events: Hold event_mutex during dyn_event_add
proc: bootconfig: Add null pointer check
tracing: Rename the staging files for trace_events
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/fgraph.c | 17 | ||||
-rw-r--r-- | kernel/trace/trace_events_user.c | 81 |
2 files changed, 10 insertions, 88 deletions
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 19028e072cdb..8f4fb328133a 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -7,6 +7,7 @@ * * Highly modified by Steven Rostedt (VMware). */ +#include <linux/jump_label.h> #include <linux/suspend.h> #include <linux/ftrace.h> #include <linux/slab.h> @@ -23,25 +24,13 @@ #define ASSIGN_OPS_HASH(opsname, val) #endif -static bool kill_ftrace_graph; +DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); int ftrace_graph_active; /* Both enabled by default (can be cleared by function_graph tracer flags */ static bool fgraph_sleep_time = true; /** - * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called - * - * ftrace_graph_stop() is called when a severe error is detected in - * the function graph tracing. This function is called by the critical - * paths of function graph to keep those paths from doing any more harm. - */ -bool ftrace_graph_is_dead(void) -{ - return kill_ftrace_graph; -} - -/** * ftrace_graph_stop - set to permanently disable function graph tracing * * In case of an error int function graph tracing, this is called @@ -51,7 +40,7 @@ bool ftrace_graph_is_dead(void) */ void ftrace_graph_stop(void) { - kill_ftrace_graph = true; + static_branch_enable(&kill_ftrace_graph); } /* Add a function return address to the trace stack on thread info.*/ diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 68d62bfac12f..706e1686b5eb 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -47,9 +47,6 @@ #define MAX_FIELD_ARRAY_SIZE 1024 #define MAX_FIELD_ARG_NAME 256 -#define MAX_BPF_COPY_SIZE PAGE_SIZE -#define MAX_STACK_BPF_DATA 512 - static char *register_page_data; static DEFINE_MUTEX(reg_mutex); @@ -410,19 +407,6 @@ parse: type[0] != 'u', FILTER_OTHER); } -static void user_event_parse_flags(struct user_event *user, char *flags) -{ - char *flag; - - if (flags == NULL) - return; - - while ((flag = strsep(&flags, ",")) != NULL) { - if (strcmp(flag, "BPF_ITER") == 0) - user->flags |= FLAG_BPF_ITER; - } -} - static int user_event_parse_fields(struct user_event *user, char *args) { char *field; @@ -718,64 +702,14 @@ discard: } #ifdef CONFIG_PERF_EVENTS -static void user_event_bpf(struct user_event *user, struct iov_iter *i) -{ - struct user_bpf_context context; - struct user_bpf_iter bpf_i; - char fast_data[MAX_STACK_BPF_DATA]; - void *temp = NULL; - - if ((user->flags & FLAG_BPF_ITER) && iter_is_iovec(i)) { - /* Raw iterator */ - context.data_type = USER_BPF_DATA_ITER; - context.data_len = i->count; - context.iter = &bpf_i; - - bpf_i.iov_offset = i->iov_offset; - bpf_i.iov = i->iov; - bpf_i.nr_segs = i->nr_segs; - } else if (i->nr_segs == 1 && iter_is_iovec(i)) { - /* Single buffer from user */ - context.data_type = USER_BPF_DATA_USER; - context.data_len = i->count; - context.udata = i->iov->iov_base + i->iov_offset; - } else { - /* Multi buffer from user */ - struct iov_iter copy = *i; - size_t copy_size = min_t(size_t, i->count, MAX_BPF_COPY_SIZE); - - context.data_type = USER_BPF_DATA_KERNEL; - context.kdata = fast_data; - - if (unlikely(copy_size > sizeof(fast_data))) { - temp = kmalloc(copy_size, GFP_NOWAIT); - - if (temp) - context.kdata = temp; - else - copy_size = sizeof(fast_data); - } - - context.data_len = copy_nofault(context.kdata, - copy_size, ©); - } - - trace_call_bpf(&user->call, &context); - - kfree(temp); -} - /* - * Writes the user supplied payload out to perf ring buffer or eBPF program. + * Writes the user supplied payload out to perf ring buffer. */ static void user_event_perf(struct user_event *user, struct iov_iter *i, void *tpdata, bool *faulted) { struct hlist_head *perf_head; - if (bpf_prog_array_valid(&user->call)) - user_event_bpf(user, i); - perf_head = this_cpu_ptr(user->call.perf_events); if (perf_head && !hlist_empty(perf_head)) { @@ -1141,8 +1075,6 @@ static int user_event_parse(char *name, char *args, char *flags, user->tracepoint.name = name; - user_event_parse_flags(user, flags); - ret = user_event_parse_fields(user, args); if (ret) @@ -1170,11 +1102,11 @@ static int user_event_parse(char *name, char *args, char *flags, #endif mutex_lock(&event_mutex); + ret = user_event_trace_register(user); - mutex_unlock(&event_mutex); if (ret) - goto put_user; + goto put_user_lock; user->index = index; @@ -1186,8 +1118,12 @@ static int user_event_parse(char *name, char *args, char *flags, set_bit(user->index, page_bitmap); hash_add(register_table, &user->node, key); + mutex_unlock(&event_mutex); + *newuser = user; return 0; +put_user_lock: + mutex_unlock(&event_mutex); put_user: user_event_destroy_fields(user); user_event_destroy_validators(user); @@ -1580,9 +1516,6 @@ static int user_seq_show(struct seq_file *m, void *p) busy++; } - if (flags & FLAG_BPF_ITER) - seq_puts(m, " FLAG:BPF_ITER"); - seq_puts(m, "\n"); active++; } |