aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat)2013-03-12 11:49:18 -0400
committerSteven Rostedt2013-03-15 00:36:01 -0400
commit77fd5c15e3216b901be69047ca43b05ae9099951 (patch)
tree1790b9bacd1d371cf03902efaf99cdc9d08abdec
parent3209cff4490bee55fd2bc1d087cb8ecf2a686a88 (diff)
tracing: Add snapshot trigger to function probes
echo 'schedule:snapshot:1' > /debug/tracing/set_ftrace_filter This will cause the scheduler to trigger a snapshot the next time it's called (you can use any function that's not called by NMI). Even though it triggers only once, you still need to remove it with: echo '!schedule:snapshot:0' > /debug/tracing/set_ftrace_filter The :1 can be left off for the first command: echo 'schedule:snapshot' > /debug/tracing/set_ftrace_filter But this will cause all calls to schedule to trigger a snapshot. This must be removed without the ':0' echo '!schedule:snapshot' > /debug/tracing/set_ftrace_filter As adding a "count" is a different operation (internally). Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.c111
1 files changed, 110 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 906049c0af90..c5b844621562 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5086,7 +5086,114 @@ static const struct file_operations tracing_dyn_info_fops = {
.read = tracing_read_dyn_info,
.llseek = generic_file_llseek,
};
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
+static void
+ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ tracing_snapshot();
+}
+
+static void
+ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ unsigned long *count = (long *)data;
+
+ if (!*count)
+ return;
+
+ if (*count != -1)
+ (*count)--;
+
+ tracing_snapshot();
+}
+
+static int
+ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
+{
+ long count = (long)data;
+
+ seq_printf(m, "%ps:", (void *)ip);
+
+ seq_printf(m, "snapshot");
+
+ if (count == -1)
+ seq_printf(m, ":unlimited\n");
+ else
+ seq_printf(m, ":count=%ld\n", count);
+
+ return 0;
+}
+
+static struct ftrace_probe_ops snapshot_probe_ops = {
+ .func = ftrace_snapshot,
+ .print = ftrace_snapshot_print,
+};
+
+static struct ftrace_probe_ops snapshot_count_probe_ops = {
+ .func = ftrace_count_snapshot,
+ .print = ftrace_snapshot_print,
+};
+
+static int
+ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
+{
+ struct ftrace_probe_ops *ops;
+ void *count = (void *)-1;
+ char *number;
+ int ret;
+
+ /* hash funcs only work with set_ftrace_filter */
+ if (!enable)
+ return -EINVAL;
+
+ ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
+
+ if (glob[0] == '!') {
+ unregister_ftrace_function_probe_func(glob+1, ops);
+ return 0;
+ }
+
+ if (!param)
+ goto out_reg;
+
+ number = strsep(&param, ":");
+
+ if (!strlen(number))
+ goto out_reg;
+
+ /*
+ * We use the callback data field (which is a pointer)
+ * as our counter.
+ */
+ ret = kstrtoul(number, 0, (unsigned long *)&count);
+ if (ret)
+ return ret;
+
+ out_reg:
+ ret = register_ftrace_function_probe(glob, ops, count);
+
+ if (ret >= 0)
+ alloc_snapshot(&global_trace);
+
+ return ret < 0 ? ret : 0;
+}
+
+static struct ftrace_func_command ftrace_snapshot_cmd = {
+ .name = "snapshot",
+ .func = ftrace_trace_snapshot_callback,
+};
+
+static int register_snapshot_cmd(void)
+{
+ return register_ftrace_command(&ftrace_snapshot_cmd);
+}
+#else
+static inline int register_snapshot_cmd(void) { return 0; }
+#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
{
@@ -6076,6 +6183,8 @@ __init static int tracer_alloc_buffers(void)
trace_set_options(&global_trace, option);
}
+ register_snapshot_cmd();
+
return 0;
out_free_cpumask: