diff options
Diffstat (limited to 'kernel/trace')
35 files changed, 472 insertions, 423 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9a27f146fa1c..c042a455afc6 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -47,6 +47,11 @@ config HAVE_FENTRY help Arch supports the gcc options -pg with -mfentry +config HAVE_NOP_MCOUNT + bool + help + Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount + config HAVE_C_RECORDMCOUNT bool help @@ -82,6 +87,15 @@ config RING_BUFFER_ALLOW_SWAP Allow the use of ring_buffer_swap_cpu. Adds a very slight overhead to tracing when enabled. +config PREEMPTIRQ_TRACEPOINTS + bool + depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS + select TRACING + default y + help + Create preempt/irq toggle tracepoints if needed, so that other parts + of the kernel can use them to generate or add hooks to them. + # All tracer options should select GENERIC_TRACER. For those options that are # enabled by all tracers (context switch and event tracer) they select TRACING. # This allows those options to appear when no other tracer is selected. But the @@ -155,18 +169,20 @@ config FUNCTION_GRAPH_TRACER the return value. This is done by setting the current return address on the current task structure into a stack of calls. +config TRACE_PREEMPT_TOGGLE + bool + help + Enables hooks which will be called when preemption is first disabled, + and last enabled. config PREEMPTIRQ_EVENTS bool "Enable trace events for preempt and irq disable/enable" select TRACE_IRQFLAGS - depends on DEBUG_PREEMPT || !PROVE_LOCKING - depends on TRACING + select TRACE_PREEMPT_TOGGLE if PREEMPT + select GENERIC_TRACER default n help Enable tracing of disable and enable events for preemption and irqs. - For tracing preempt disable/enable events, DEBUG_PREEMPT must be - enabled. For tracing irq disable/enable events, PROVE_LOCKING must - be disabled. config IRQSOFF_TRACER bool "Interrupts-off Latency Tracer" @@ -203,6 +219,7 @@ config PREEMPT_TRACER select RING_BUFFER_ALLOW_SWAP select TRACER_SNAPSHOT select TRACER_SNAPSHOT_PER_CPU_SWAP + select TRACE_PREEMPT_TOGGLE help This option measures the time spent in preemption-off critical sections, with microsecond accuracy. @@ -456,6 +473,26 @@ config KPROBE_EVENTS This option is also required by perf-probe subcommand of perf tools. If you want to use perf tools, this option is strongly recommended. +config KPROBE_EVENTS_ON_NOTRACE + bool "Do NOT protect notrace function from kprobe events" + depends on KPROBE_EVENTS + depends on KPROBES_ON_FTRACE + default n + help + This is only for the developers who want to debug ftrace itself + using kprobe events. + + If kprobes can use ftrace instead of breakpoint, ftrace related + functions are protected from kprobe-events to prevent an infinit + recursion or any unexpected execution path which leads to a kernel + crash. + + This option disables such protection and allows you to put kprobe + events on ftrace functions for debugging ftrace by itself. + Note that this might let you shoot yourself in the foot. + + If unsure, say N. + config UPROBE_EVENTS bool "Enable uprobes-based dynamic events" depends on ARCH_SUPPORTS_UPROBES @@ -687,6 +724,21 @@ config RING_BUFFER_STARTUP_TEST If unsure, say N +config PREEMPTIRQ_DELAY_TEST + tristate "Preempt / IRQ disable delay thread to test latency tracers" + depends on m + help + Select this option to build a test module that can help test latency + tracers by executing a preempt or irq disable section with a user + configurable delay. The module busy waits for the duration of the + critical section. + + For example, the following invocation forces a one-time irq-disabled + critical section for 500us: + modprobe preemptirq_delay_test test_mode=irq delay=500000 + + If unsure, say N + config TRACE_EVAL_MAP_FILE bool "Show eval mappings for trace events" depends on TRACING diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index e2538c7638d4..98d53b39a8ee 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -13,6 +13,11 @@ obj-y += trace_selftest_dynamic.o endif endif +ifdef CONFIG_FTRACE_STARTUP_TEST +CFLAGS_trace_kprobe_selftest.o = $(CC_FLAGS_FTRACE) +obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe_selftest.o +endif + # If unlikely tracing is enabled, do not trace these files ifdef CONFIG_TRACING_BRANCHES KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING @@ -33,9 +38,10 @@ obj-$(CONFIG_TRACING) += trace_seq.o obj-$(CONFIG_TRACING) += trace_stat.o obj-$(CONFIG_TRACING) += trace_printk.o obj-$(CONFIG_TRACING_MAP) += tracing_map.o +obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o -obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o +obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b951aa1fac61..2a24a59e99c5 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #include <linux/kernel.h> #include <linux/blkdev.h> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0ae6829804bc..08fcfe440c63 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1,9 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index caf9cbf35816..f536f601bd46 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Infrastructure for profiling code inserted by 'gcc -pg'. * @@ -157,30 +158,6 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) #endif } -/** - * ftrace_nr_registered_ops - return number of ops registered - * - * Returns the number of ftrace_ops registered and tracing functions - */ -int ftrace_nr_registered_ops(void) -{ - struct ftrace_ops *ops; - int cnt = 0; - - mutex_lock(&ftrace_lock); - - for (ops = rcu_dereference_protected(ftrace_ops_list, - lockdep_is_held(&ftrace_lock)); - ops != &ftrace_list_end; - ops = rcu_dereference_protected(ops->next, - lockdep_is_held(&ftrace_lock))) - cnt++; - - mutex_unlock(&ftrace_lock); - - return cnt; -} - static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) { @@ -313,11 +290,6 @@ static void update_ftrace_function(void) ftrace_trace_function = func; } -int using_ftrace_ops_list_func(void) -{ - return ftrace_trace_function == ftrace_ops_list_func; -} - static void add_ftrace_ops(struct ftrace_ops __rcu **list, struct ftrace_ops *ops) { @@ -1049,8 +1021,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer) } #endif /* CONFIG_FUNCTION_PROFILER */ -static struct pid * const ftrace_swapper_pid = &init_struct_pid; - #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int ftrace_graph_active; #else @@ -2927,22 +2897,22 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) { /* If ops isn't enabled, ignore it */ if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) - return 0; + return false; /* If ops traces all then it includes this function */ if (ops_traces_mod(ops)) - return 1; + return true; /* The function must be in the filter */ if (!ftrace_hash_empty(ops->func_hash->filter_hash) && !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) - return 0; + return false; /* If in notrace hash, we ignore it too */ if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) - return 0; + return false; - return 1; + return true; } static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) @@ -2981,12 +2951,14 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) p = &pg->records[i]; p->flags = rec_flags; +#ifndef CC_USING_NOP_MCOUNT /* * Do the initial record conversion from mcount jump * to the NOP instructions. */ if (!ftrace_code_disable(mod, p)) break; +#endif update_cnt++; } diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c new file mode 100644 index 000000000000..f704390db9fc --- /dev/null +++ b/kernel/trace/preemptirq_delay_test.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Preempt / IRQ disable delay thread to test latency tracers + * + * Copyright (C) 2018 Joel Fernandes (Google) <joel@joelfernandes.org> + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/string.h> + +static ulong delay = 100; +static char test_mode[10] = "irq"; + +module_param_named(delay, delay, ulong, S_IRUGO); +module_param_string(test_mode, test_mode, 10, S_IRUGO); +MODULE_PARM_DESC(delay, "Period in microseconds (100 uS default)"); +MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt or irq (default irq)"); + +static void busy_wait(ulong time) +{ + ktime_t start, end; + start = ktime_get(); + do { + end = ktime_get(); + if (kthread_should_stop()) + break; + } while (ktime_to_ns(ktime_sub(end, start)) < (time * 1000)); +} + +static int preemptirq_delay_run(void *data) +{ + unsigned long flags; + + if (!strcmp(test_mode, "irq")) { + local_irq_save(flags); + busy_wait(delay); + local_irq_restore(flags); + } else if (!strcmp(test_mode, "preempt")) { + preempt_disable(); + busy_wait(delay); + preempt_enable(); + } + + return 0; +} + +static int __init preemptirq_delay_init(void) +{ + char task_name[50]; + struct task_struct *test_task; + + snprintf(task_name, sizeof(task_name), "%s_test", test_mode); + + test_task = kthread_run(preemptirq_delay_run, NULL, task_name); + return PTR_ERR_OR_ZERO(test_task); +} + +static void __exit preemptirq_delay_exit(void) +{ + return; +} + +module_init(preemptirq_delay_init) +module_exit(preemptirq_delay_exit) +MODULE_LICENSE("GPL v2"); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0b0b688ea166..1d92d4a982fd 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Generic ring buffer * @@ -3221,7 +3222,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on); * * Returns true if the ring buffer is in a state that it accepts writes. */ -int ring_buffer_record_is_on(struct ring_buffer *buffer) +bool ring_buffer_record_is_on(struct ring_buffer *buffer) { return !atomic_read(&buffer->record_disabled); } @@ -3237,7 +3238,7 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer) * ring_buffer_record_disable(), as that is a temporary disabling of * the ring buffer. */ -int ring_buffer_record_is_set_on(struct ring_buffer *buffer) +bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) { return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); } diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 68ee79afe31c..ffba6789c0e2 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * ring buffer tester and benchmark * diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 176debd3481b..bf6f1d70484d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * @@ -1087,7 +1088,7 @@ void disable_trace_on_warning(void) * * Shows real state of the ring buffer if it is enabled or not. */ -int tracer_tracing_is_on(struct trace_array *tr) +bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) return ring_buffer_record_is_on(tr->trace_buffer.buffer); @@ -7628,7 +7629,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf, if (buffer) { mutex_lock(&trace_types_lock); - if (val) { + if (!!val == tracer_tracing_is_on(tr)) { + val = 0; /* do nothing */ + } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f8f86231ad90..3b8c0e24ab30 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 #ifndef _LINUX_KERNEL_TRACE_H #define _LINUX_KERNEL_TRACE_H @@ -594,7 +594,7 @@ void tracing_reset_current(int cpu); void tracing_reset_all_online_cpus(void); int tracing_open_generic(struct inode *inode, struct file *filp); bool tracing_is_disabled(void); -int tracer_tracing_is_on(struct trace_array *tr); +bool tracer_tracing_is_on(struct trace_array *tr); void tracer_tracing_on(struct trace_array *tr); void tracer_tracing_off(struct trace_array *tr); struct dentry *trace_create_file(const char *name, @@ -937,7 +937,6 @@ void ftrace_destroy_function_files(struct trace_array *tr); void ftrace_init_global_array_ops(struct trace_array *tr); void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); void ftrace_reset_array_ops(struct trace_array *tr); -int using_ftrace_ops_list_func(void); void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d_tracer); @@ -1533,9 +1532,6 @@ extern int event_trigger_init(struct event_trigger_ops *ops, extern int trace_event_trigger_enable_disable(struct trace_event_file *file, int trigger_enable); extern void update_cond_flag(struct trace_event_file *file); -extern void unregister_trigger(char *glob, struct event_trigger_ops *ops, - struct event_trigger_data *test, - struct trace_event_file *file); extern int set_trigger_filter(char *filter_str, struct event_trigger_data *trigger_data, struct trace_event_file *file); @@ -1831,6 +1827,21 @@ static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) } #endif +#ifdef CONFIG_PREEMPT_TRACER +void tracer_preempt_on(unsigned long a0, unsigned long a1); +void tracer_preempt_off(unsigned long a0, unsigned long a1); +#else +static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } +static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } +#endif +#ifdef CONFIG_IRQSOFF_TRACER +void tracer_hardirqs_on(unsigned long a0, unsigned long a1); +void tracer_hardirqs_off(unsigned long a0, unsigned long a1); +#else +static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } +static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } +#endif + extern struct trace_iterator *tracepoint_print_iter; #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_benchmark.h b/kernel/trace/trace_benchmark.h index be1d86ff753d..79e6fbe5b365 100644 --- a/kernel/trace/trace_benchmark.h +++ b/kernel/trace/trace_benchmark.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 #undef TRACE_SYSTEM #define TRACE_SYSTEM benchmark diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index d8a188e0418a..aaf6793ededa 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * tracing clocks * diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 1d67464ed95e..06bb2fd9a56c 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 /* * This file defines the trace event structures that go into the ring * buffer directly. They are created via macros so that changes for them diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index c79193e598f5..69a3fe926e8c 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace event based perf event profiling/tracing * diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 14ff4ff3caab..f94be0c2827b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * event tracer * @@ -239,7 +240,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) struct trace_array_cpu *data; struct trace_pid_list *pid_list; - pid_list = rcu_dereference_sched(tr->filtered_pids); + pid_list = rcu_dereference_raw(tr->filtered_pids); if (!pid_list) return false; @@ -512,7 +513,7 @@ event_filter_pid_sched_process_exit(void *data, struct task_struct *task) struct trace_pid_list *pid_list; struct trace_array *tr = data; - pid_list = rcu_dereference_sched(tr->filtered_pids); + pid_list = rcu_dereference_raw(tr->filtered_pids); trace_filter_add_remove_task(pid_list, NULL, task); } @@ -636,7 +637,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr) rcu_assign_pointer(tr->filtered_pids, NULL); /* Wait till all users are no longer using pid filtering */ - synchronize_sched(); + tracepoint_synchronize_unregister(); trace_free_pid_list(pid_list); } @@ -1622,7 +1623,7 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, } if (filtered_pids) { - synchronize_sched(); + tracepoint_synchronize_unregister(); trace_free_pid_list(filtered_pids); } else if (pid_list) { /* @@ -3036,8 +3037,8 @@ int event_trace_del_tracer(struct trace_array *tr) /* Disable any running events */ __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); - /* Access to events are within rcu_read_lock_sched() */ - synchronize_sched(); + /* Make sure no more events are being executed */ + tracepoint_synchronize_unregister(); down_write(&trace_event_sem); __trace_remove_event_dirs(tr); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 893a206bcba4..84a65173b1e9 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace_events_filter - generic event filtering * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> */ @@ -899,7 +886,8 @@ int filter_match_preds(struct event_filter *filter, void *rec) if (!filter) return 1; - prog = rcu_dereference_sched(filter->prog); + /* Protected by either SRCU(tracepoint_srcu) or preempt_disable */ + prog = rcu_dereference_raw(filter->prog); if (!prog) return 1; @@ -1626,10 +1614,10 @@ static int process_system_preds(struct trace_subsystem_dir *dir, /* * The calls can still be using the old filters. - * Do a synchronize_sched() to ensure all calls are + * Do a synchronize_sched() and to ensure all calls are * done with them before we free them. */ - synchronize_sched(); + tracepoint_synchronize_unregister(); list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { __free_filter(filter_item->filter); list_del(&filter_item->list); @@ -1648,7 +1636,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, kfree(filter); /* If any call succeeded, we still need to sync */ if (!fail) - synchronize_sched(); + tracepoint_synchronize_unregister(); list_for_each_entry_safe(filter_item, tmp, &filter_list, list) { __free_filter(filter_item->filter); list_del(&filter_item->list); @@ -1790,7 +1778,7 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string) event_clear_filter(file); /* Make sure the filter is not being used */ - synchronize_sched(); + tracepoint_synchronize_unregister(); __free_filter(filter); return 0; @@ -1817,7 +1805,7 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string) if (tmp) { /* Make sure the call is done with the filter */ - synchronize_sched(); + tracepoint_synchronize_unregister(); __free_filter(tmp); } } @@ -1847,7 +1835,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, filter = system->filter; system->filter = NULL; /* Ensure all filters are no longer used */ - synchronize_sched(); + tracepoint_synchronize_unregister(); filter_free_subsystem_filters(dir, tr); __free_filter(filter); goto out_unlock; diff --git a/kernel/trace/trace_events_filter_test.h b/kernel/trace/trace_events_filter_test.h index 39d7ef4f57cb..e651dfbd345e 100644 --- a/kernel/trace/trace_events_filter_test.h +++ b/kernel/trace/trace_events_filter_test.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 #undef TRACE_SYSTEM #define TRACE_SYSTEM test diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index aae18af94c94..85f6b01431c7 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1,16 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace_events_hist - trace event hist triggers * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> */ @@ -5141,7 +5132,7 @@ static void hist_clear(struct event_trigger_data *data) if (data->name) pause_named_trigger(data); - synchronize_sched(); + tracepoint_synchronize_unregister(); tracing_map_clear(hist_data->map); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 5dea177cef53..2152d1e530cb 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace_events_trigger - trace event triggers * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> */ @@ -34,7 +21,9 @@ void trigger_data_free(struct event_trigger_data *data) if (data->cmd_ops->set_filter) data->cmd_ops->set_filter(NULL, data, NULL); - synchronize_sched(); /* make sure current triggers exit before free */ + /* make sure current triggers exit before free */ + tracepoint_synchronize_unregister(); + kfree(data); } @@ -579,9 +568,9 @@ out: * Usually used directly as the @unreg method in event command * implementations. */ -void unregister_trigger(char *glob, struct event_trigger_ops *ops, - struct event_trigger_data *test, - struct trace_event_file *file) +static void unregister_trigger(char *glob, struct event_trigger_ops *ops, + struct event_trigger_data *test, + struct trace_event_file *file) { struct event_trigger_data *data; bool unregistered = false; @@ -752,7 +741,7 @@ int set_trigger_filter(char *filter_str, if (tmp) { /* Make sure the call is done with the filter */ - synchronize_sched(); + tracepoint_synchronize_unregister(); free_event_filter(tmp); } diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index d7c8e4ec3d9d..1e6db9cbe4dc 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace_hwlatdetect.c - A simple Hardware Latency detector. * @@ -35,9 +36,6 @@ * * Includes useful feedback from Clark Williams <clark@redhat.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kthread.h> #include <linux/tracefs.h> @@ -354,6 +352,9 @@ static int start_kthread(struct trace_array *tr) struct task_struct *kthread; int next_cpu; + if (WARN_ON(hwlat_kthread)) + return 0; + /* Just pick the first CPU on first iteration */ current_mask = &save_cpumask; get_online_cpus(); diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 03ecb4465ee4..b7357f9f82a3 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace irqs off critical timings * @@ -16,7 +17,6 @@ #include "trace.h" -#define CREATE_TRACE_POINTS #include <trace/events/preemptirq.h> #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) @@ -41,12 +41,12 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph); #ifdef CONFIG_PREEMPT_TRACER static inline int -preempt_trace(void) +preempt_trace(int pc) { - return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); + return ((trace_type & TRACER_PREEMPT_OFF) && pc); } #else -# define preempt_trace() (0) +# define preempt_trace(pc) (0) #endif #ifdef CONFIG_IRQSOFF_TRACER @@ -367,7 +367,7 @@ out: } static inline void -start_critical_timing(unsigned long ip, unsigned long parent_ip) +start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) { int cpu; struct trace_array *tr = irqsoff_trace; @@ -395,7 +395,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) local_save_flags(flags); - __trace_function(tr, ip, parent_ip, flags, preempt_count()); + __trace_function(tr, ip, parent_ip, flags, pc); per_cpu(tracing_cpu, cpu) = 1; @@ -403,7 +403,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) } static inline void -stop_critical_timing(unsigned long ip, unsigned long parent_ip) +stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc) { int cpu; struct trace_array *tr = irqsoff_trace; @@ -429,7 +429,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) atomic_inc(&data->disabled); local_save_flags(flags); - __trace_function(tr, ip, parent_ip, flags, preempt_count()); + __trace_function(tr, ip, parent_ip, flags, pc); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); @@ -438,77 +438,21 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) /* start and stop critical timings used to for stoppage (in idle) */ void start_critical_timings(void) { - if (preempt_trace() || irq_trace()) - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + int pc = preempt_count(); + + if (preempt_trace(pc) || irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { - if (preempt_trace() || irq_trace()) - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -} -EXPORT_SYMBOL_GPL(stop_critical_timings); - -#ifdef CONFIG_IRQSOFF_TRACER -#ifdef CONFIG_PROVE_LOCKING -void time_hardirqs_on(unsigned long a0, unsigned long a1) -{ - if (!preempt_trace() && irq_trace()) - stop_critical_timing(a0, a1); -} - -void time_hardirqs_off(unsigned long a0, unsigned long a1) -{ - if (!preempt_trace() && irq_trace()) - start_critical_timing(a0, a1); -} - -#else /* !CONFIG_PROVE_LOCKING */ - -/* - * We are only interested in hardirq on/off events: - */ -static inline void tracer_hardirqs_on(void) -{ - if (!preempt_trace() && irq_trace()) - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -} + int pc = preempt_count(); -static inline void tracer_hardirqs_off(void) -{ - if (!preempt_trace() && irq_trace()) - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -} - -static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) -{ - if (!preempt_trace() && irq_trace()) - stop_critical_timing(CALLER_ADDR0, caller_addr); -} - -static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) -{ - if (!preempt_trace() && irq_trace()) - start_critical_timing(CALLER_ADDR0, caller_addr); -} - -#endif /* CONFIG_PROVE_LOCKING */ -#endif /* CONFIG_IRQSOFF_TRACER */ - -#ifdef CONFIG_PREEMPT_TRACER -static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) -{ - if (preempt_trace() && !irq_trace()) - stop_critical_timing(a0, a1); -} - -static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) -{ - if (preempt_trace() && !irq_trace()) - start_critical_timing(a0, a1); + if (preempt_trace(pc) || irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc); } -#endif /* CONFIG_PREEMPT_TRACER */ +EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_FUNCTION_TRACER static bool function_enabled; @@ -634,7 +578,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr) return 0; } -static void irqsoff_tracer_reset(struct trace_array *tr) +static void __irqsoff_tracer_reset(struct trace_array *tr) { int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; @@ -659,12 +603,37 @@ static void irqsoff_tracer_stop(struct trace_array *tr) } #ifdef CONFIG_IRQSOFF_TRACER +/* + * We are only interested in hardirq on/off events: + */ +void tracer_hardirqs_on(unsigned long a0, unsigned long a1) +{ + unsigned int pc = preempt_count(); + + if (!preempt_trace(pc) && irq_trace()) + stop_critical_timing(a0, a1, pc); +} + +void tracer_hardirqs_off(unsigned long a0, unsigned long a1) +{ + unsigned int pc = preempt_count(); + + if (!preempt_trace(pc) && irq_trace()) + start_critical_timing(a0, a1, pc); +} + static int irqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF; return __irqsoff_tracer_init(tr); } + +static void irqsoff_tracer_reset(struct trace_array *tr) +{ + __irqsoff_tracer_reset(tr); +} + static struct tracer irqsoff_tracer __read_mostly = { .name = "irqsoff", @@ -684,12 +653,25 @@ static struct tracer irqsoff_tracer __read_mostly = .allow_instances = true, .use_max_tr = true, }; -# define register_irqsoff(trace) register_tracer(&trace) -#else -# define register_irqsoff(trace) do { } while (0) -#endif +#endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER +void tracer_preempt_on(unsigned long a0, unsigned long a1) +{ + int pc = preempt_count(); + + if (preempt_trace(pc) && !irq_trace()) + stop_critical_timing(a0, a1, pc); +} + +void tracer_preempt_off(unsigned long a0, unsigned long a1) +{ + int pc = preempt_count(); + + if (preempt_trace(pc) && !irq_trace()) + start_critical_timing(a0, a1, pc); +} + static int preemptoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_PREEMPT_OFF; @@ -697,11 +679,16 @@ static int preemptoff_tracer_init(struct trace_array *tr) return __irqsoff_tracer_init(tr); } +static void preemptoff_tracer_reset(struct trace_array *tr) +{ + __irqsoff_tracer_reset(tr); +} + static struct tracer preemptoff_tracer __read_mostly = { .name = "preemptoff", .init = preemptoff_tracer_init, - .reset = irqsoff_tracer_reset, + .reset = preemptoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = true, @@ -716,13 +703,9 @@ static struct tracer preemptoff_tracer __read_mostly = .allow_instances = true, .use_max_tr = true, }; -# define register_preemptoff(trace) register_tracer(&trace) -#else -# define register_preemptoff(trace) do { } while (0) -#endif +#endif /* CONFIG_PREEMPT_TRACER */ -#if defined(CONFIG_IRQSOFF_TRACER) && \ - defined(CONFIG_PREEMPT_TRACER) +#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) static int preemptirqsoff_tracer_init(struct trace_array *tr) { @@ -731,11 +714,16 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr) return __irqsoff_tracer_init(tr); } +static void preemptirqsoff_tracer_reset(struct trace_array *tr) +{ + __irqsoff_tracer_reset(tr); +} + static struct tracer preemptirqsoff_tracer __read_mostly = { .name = "preemptirqsoff", .init = preemptirqsoff_tracer_init, - .reset = irqsoff_tracer_reset, + .reset = preemptirqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = true, @@ -750,115 +738,21 @@ static struct tracer preemptirqsoff_tracer __read_mostly = .allow_instances = true, .use_max_tr = true, }; - -# define register_preemptirqsoff(trace) register_tracer(&trace) -#else -# define register_preemptirqsoff(trace) do { } while (0) #endif __init static int init_irqsoff_tracer(void) { - register_irqsoff(irqsoff_tracer); - register_preemptoff(preemptoff_tracer); - register_preemptirqsoff(preemptirqsoff_tracer); - - return 0; -} -core_initcall(init_irqsoff_tracer); -#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ - -#ifndef CONFIG_IRQSOFF_TRACER -static inline void tracer_hardirqs_on(void) { } -static inline void tracer_hardirqs_off(void) { } -static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { } -static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { } +#ifdef CONFIG_IRQSOFF_TRACER + register_tracer(&irqsoff_tracer); #endif - -#ifndef CONFIG_PREEMPT_TRACER -static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } -static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } +#ifdef CONFIG_PREEMPT_TRACER + register_tracer(&preemptoff_tracer); #endif - -#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) -/* Per-cpu variable to prevent redundant calls when IRQs already off */ -static DEFINE_PER_CPU(int, tracing_irq_cpu); - -void trace_hardirqs_on(void) -{ - if (!this_cpu_read(tracing_irq_cpu)) - return; - - trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); - tracer_hardirqs_on(); - - this_cpu_write(tracing_irq_cpu, 0); -} -EXPORT_SYMBOL(trace_hardirqs_on); - -void trace_hardirqs_off(void) -{ - if (this_cpu_read(tracing_irq_cpu)) - return; - - this_cpu_write(tracing_irq_cpu, 1); - - trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); - tracer_hardirqs_off(); -} -EXPORT_SYMBOL(trace_hardirqs_off); - -__visible void trace_hardirqs_on_caller(unsigned long caller_addr) -{ - if (!this_cpu_read(tracing_irq_cpu)) - return; - - trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); - tracer_hardirqs_on_caller(caller_addr); - - this_cpu_write(tracing_irq_cpu, 0); -} -EXPORT_SYMBOL(trace_hardirqs_on_caller); - -__visible void trace_hardirqs_off_caller(unsigned long caller_addr) -{ - if (this_cpu_read(tracing_irq_cpu)) - return; - - this_cpu_write(tracing_irq_cpu, 1); - - trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); - tracer_hardirqs_off_caller(caller_addr); -} -EXPORT_SYMBOL(trace_hardirqs_off_caller); - -/* - * Stubs: - */ - -void trace_softirqs_on(unsigned long ip) -{ -} - -void trace_softirqs_off(unsigned long ip) -{ -} - -inline void print_irqtrace_events(struct task_struct *curr) -{ -} +#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) + register_tracer(&preemptirqsoff_tracer); #endif -#if defined(CONFIG_PREEMPT_TRACER) || \ - (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) -void trace_preempt_on(unsigned long a0, unsigned long a1) -{ - trace_preempt_enable_rcuidle(a0, a1); - tracer_preempt_on(a0, a1); -} - -void trace_preempt_off(unsigned long a0, unsigned long a1) -{ - trace_preempt_disable_rcuidle(a0, a1); - tracer_preempt_off(a0, a1); + return 0; } -#endif +core_initcall(init_irqsoff_tracer); +#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index e9d99463e5df..2bfb2bbeb3d2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1,20 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Kprobes-based tracing events * * Created by Masami Hiramatsu <mhiramat@redhat.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) "trace_kprobe: " fmt @@ -23,6 +12,7 @@ #include <linux/rculist.h> #include <linux/error-injection.h> +#include "trace_kprobe_selftest.h" #include "trace_probe.h" #define KPROBE_EVENT_SYSTEM "kprobes" @@ -87,6 +77,23 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) return nhit; } +/* Return 0 if it fails to find the symbol address */ +static nokprobe_inline +unsigned long trace_kprobe_address(struct trace_kprobe *tk) +{ + unsigned long addr; + + if (tk->symbol) { + addr = (unsigned long) + kallsyms_lookup_name(trace_kprobe_symbol(tk)); + if (addr) + addr += tk->rp.kp.offset; + } else { + addr = (unsigned long)tk->rp.kp.addr; + } + return addr; +} + bool trace_kprobe_on_func_entry(struct trace_event_call *call) { struct trace_kprobe *tk = (struct trace_kprobe *)call->data; @@ -99,16 +106,8 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call) bool trace_kprobe_error_injectable(struct trace_event_call *call) { struct trace_kprobe *tk = (struct trace_kprobe *)call->data; - unsigned long addr; - if (tk->symbol) { - addr = (unsigned long) - kallsyms_lookup_name(trace_kprobe_symbol(tk)); - addr += tk->rp.kp.offset; - } else { - addr = (unsigned long)tk->rp.kp.addr; - } - return within_error_injection_list(addr); + return within_error_injection_list(trace_kprobe_address(tk)); } static int register_kprobe_event(struct trace_kprobe *tk); @@ -393,6 +392,20 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, return NULL; } +static inline int __enable_trace_kprobe(struct trace_kprobe *tk) +{ + int ret = 0; + + if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { + if (trace_kprobe_is_return(tk)) + ret = enable_kretprobe(&tk->rp); + else + ret = enable_kprobe(&tk->rp.kp); + } + + return ret; +} + /* * Enable trace_probe * if the file is NULL, enable "perf" handler, or enable "trace" handler. @@ -400,7 +413,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, static int enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) { - struct event_file_link *link = NULL; + struct event_file_link *link; int ret = 0; if (file) { @@ -414,26 +427,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) list_add_tail_rcu(&link->list, &tk->tp.files); tk->tp.flags |= TP_FLAG_TRACE; - } else - tk->tp.flags |= TP_FLAG_PROFILE; - - if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { - if (trace_kprobe_is_return(tk)) - ret = enable_kretprobe(&tk->rp); - else - ret = enable_kprobe(&tk->rp.kp); - } - - if (ret) { - if (file) { - /* Notice the if is true on not WARN() */ - if (!WARN_ON_ONCE(!link)) - list_del_rcu(&link->list); + ret = __enable_trace_kprobe(tk); + if (ret) { + list_del_rcu(&link->list); kfree(link); tk->tp.flags &= ~TP_FLAG_TRACE; - } else { - tk->tp.flags &= ~TP_FLAG_PROFILE; } + + } else { + tk->tp.flags |= TP_FLAG_PROFILE; + ret = __enable_trace_kprobe(tk); + if (ret) + tk->tp.flags &= ~TP_FLAG_PROFILE; } out: return ret; @@ -498,6 +503,22 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) return ret; } +#if defined(CONFIG_KPROBES_ON_FTRACE) && \ + !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) +static bool within_notrace_func(struct trace_kprobe *tk) +{ + unsigned long offset, size, addr; + + addr = trace_kprobe_address(tk); + if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset)) + return false; + + return !ftrace_location_range(addr - offset, addr - offset + size); +} +#else +#define within_notrace_func(tk) (false) +#endif + /* Internal register function - just handle k*probes and flags */ static int __register_trace_kprobe(struct trace_kprobe *tk) { @@ -506,6 +527,12 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) if (trace_probe_is_registered(&tk->tp)) return -EINVAL; + if (within_notrace_func(tk)) { + pr_warn("Could not probe notrace function %s\n", + trace_kprobe_symbol(tk)); + return -EINVAL; + } + for (i = 0; i < tk->tp.nr_args; i++) traceprobe_update_arg(&tk->tp.args[i]); @@ -1547,17 +1574,6 @@ fs_initcall(init_kprobe_trace); #ifdef CONFIG_FTRACE_STARTUP_TEST -/* - * The "__used" keeps gcc from removing the function symbol - * from the kallsyms table. 'noinline' makes sure that there - * isn't an inlined version used by the test method below - */ -static __used __init noinline int -kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) -{ - return a1 + a2 + a3 + a4 + a5 + a6; -} - static __init struct trace_event_file * find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) { diff --git a/kernel/trace/trace_kprobe_selftest.c b/kernel/trace/trace_kprobe_selftest.c new file mode 100644 index 000000000000..16548ee4c8c6 --- /dev/null +++ b/kernel/trace/trace_kprobe_selftest.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Function used during the kprobe self test. This function is in a separate + * compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it + * can be probed by the selftests. + */ +int kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) +{ + return a1 + a2 + a3 + a4 + a5 + a6; +} diff --git a/kernel/trace/trace_kprobe_selftest.h b/kernel/trace/trace_kprobe_selftest.h new file mode 100644 index 000000000000..c4fc7268ba7c --- /dev/null +++ b/kernel/trace/trace_kprobe_selftest.h @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Function used during the kprobe self test. This function is in a separate + * compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it + * can be probed by the selftests. + */ +int kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 1c8e30fda46a..6e6cc64faa38 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace_output.c * diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index dbba03ed96de..2f742b74e7e6 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 #ifndef __TRACE_EVENTS_H #define __TRACE_EVENTS_H diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c new file mode 100644 index 000000000000..71f553cceb3c --- /dev/null +++ b/kernel/trace/trace_preemptirq.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * preemptoff and irqoff tracepoints + * + * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org> + */ + +#include <linux/kallsyms.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/ftrace.h> +#include "trace.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/preemptirq.h> + +#ifdef CONFIG_TRACE_IRQFLAGS +/* Per-cpu variable to prevent redundant calls when IRQs already off */ +static DEFINE_PER_CPU(int, tracing_irq_cpu); + +void trace_hardirqs_on(void) +{ + if (this_cpu_read(tracing_irq_cpu)) { + if (!in_nmi()) + trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); + this_cpu_write(tracing_irq_cpu, 0); + } + + lockdep_hardirqs_on(CALLER_ADDR0); +} +EXPORT_SYMBOL(trace_hardirqs_on); + +void trace_hardirqs_off(void) +{ + if (!this_cpu_read(tracing_irq_cpu)) { + this_cpu_write(tracing_irq_cpu, 1); + tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); + if (!in_nmi()) + trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + } + + lockdep_hardirqs_off(CALLER_ADDR0); +} +EXPORT_SYMBOL(trace_hardirqs_off); + +__visible void trace_hardirqs_on_caller(unsigned long caller_addr) +{ + if (this_cpu_read(tracing_irq_cpu)) { + if (!in_nmi()) + trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); + tracer_hardirqs_on(CALLER_ADDR0, caller_addr); + this_cpu_write(tracing_irq_cpu, 0); + } + + lockdep_hardirqs_on(CALLER_ADDR0); +} +EXPORT_SYMBOL(trace_hardirqs_on_caller); + +__visible void trace_hardirqs_off_caller(unsigned long caller_addr) +{ + if (!this_cpu_read(tracing_irq_cpu)) { + this_cpu_write(tracing_irq_cpu, 1); + tracer_hardirqs_off(CALLER_ADDR0, caller_addr); + if (!in_nmi()) + trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); + } + + lockdep_hardirqs_off(CALLER_ADDR0); +} +EXPORT_SYMBOL(trace_hardirqs_off_caller); +#endif /* CONFIG_TRACE_IRQFLAGS */ + +#ifdef CONFIG_TRACE_PREEMPT_TOGGLE + +void trace_preempt_on(unsigned long a0, unsigned long a1) +{ + if (!in_nmi()) + trace_preempt_enable_rcuidle(a0, a1); + tracer_preempt_on(a0, a1); +} + +void trace_preempt_off(unsigned long a0, unsigned long a1) +{ + if (!in_nmi()) + trace_preempt_disable_rcuidle(a0, a1); + tracer_preempt_off(a0, a1); +} +#endif diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 50f44b7b2b32..b0875b327f5c 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace binary printk * diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index daf54bda4dc8..e99c3ce7aa65 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Common code for probe-based Dynamic events. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * This code was copied from kernel/trace/trace_kprobe.c written by * Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> * diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 75daff22ccea..5f52668e165d 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Common header file for probe-based Dynamic events. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * This code was copied from kernel/trace/trace_kprobe.h written by * Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> * diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index e694c9f9efa4..6b1c562ffdaf 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * trace_seq.c * diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h index 76d30b4ebe83..8786d17caf49 100644 --- a/kernel/trace/trace_stat.h +++ b/kernel/trace/trace_stat.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 #ifndef __TRACE_STAT_H #define __TRACE_STAT_H diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index bf89a51e740d..e696667da29a 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * uprobes-based tracing events * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * Copyright (C) IBM Corporation, 2010-2012 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> */ @@ -952,7 +940,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) list_del_rcu(&link->list); /* synchronize with u{,ret}probe_trace_func */ - synchronize_sched(); + synchronize_rcu(); kfree(link); if (!list_empty(&tu->tp.files)) diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 752d8042bad4..9a1c22310323 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -1,16 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * tracing_map - lock-free map for tracing * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> * * tracing_map implementation inspired by lock-free map algorithms diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h index 053eb92b2d31..a6de61fc22de 100644 --- a/kernel/trace/tracing_map.h +++ b/kernel/trace/tracing_map.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 #ifndef __TRACING_MAP_H #define __TRACING_MAP_H |