diff options
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 200 |
1 files changed, 101 insertions, 99 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 5177964943e7..fb8b5f83b4a0 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -2,11 +2,14 @@ #include "perf.h" #include "util/util.h" +#include "util/evlist.h" #include "util/cache.h" +#include "util/evsel.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" #include "util/session.h" +#include "util/tool.h" #include "util/parse-options.h" #include "util/trace-event.h" @@ -19,7 +22,7 @@ #include <pthread.h> #include <math.h> -static char const *input_name = "perf.data"; +static const char *input_name; static char default_sort_order[] = "avg, max, switch, runtime"; static const char *sort_order = default_sort_order; @@ -723,21 +726,21 @@ struct trace_migrate_task_event { struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, - struct perf_session *, + struct machine *, struct event *, int cpu, u64 timestamp, struct thread *thread); void (*runtime_event)(struct trace_runtime_event *, - struct perf_session *, + struct machine *, struct event *, int cpu, u64 timestamp, struct thread *thread); void (*wakeup_event)(struct trace_wakeup_event *, - struct perf_session *, + struct machine *, struct event *, int cpu, u64 timestamp, @@ -750,7 +753,7 @@ struct trace_sched_handler { struct thread *thread); void (*migrate_task_event)(struct trace_migrate_task_event *, - struct perf_session *session, + struct machine *machine, struct event *, int cpu, u64 timestamp, @@ -760,7 +763,7 @@ struct trace_sched_handler { static void replay_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct perf_session *session __used, + struct machine *machine __used, struct event *event, int cpu __used, u64 timestamp __used, @@ -787,7 +790,7 @@ static u64 cpu_last_switched[MAX_CPUS]; static void replay_switch_event(struct trace_switch_event *switch_event, - struct perf_session *session __used, + struct machine *machine __used, struct event *event, int cpu, u64 timestamp, @@ -1021,7 +1024,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) static void latency_switch_event(struct trace_switch_event *switch_event, - struct perf_session *session, + struct machine *machine, struct event *event __used, int cpu, u64 timestamp, @@ -1045,8 +1048,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - sched_out = perf_session__findnew(session, switch_event->prev_pid); - sched_in = perf_session__findnew(session, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, switch_event->prev_pid); + sched_in = machine__findnew_thread(machine, switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1074,13 +1077,13 @@ latency_switch_event(struct trace_switch_event *switch_event, static void latency_runtime_event(struct trace_runtime_event *runtime_event, - struct perf_session *session, + struct machine *machine, struct event *event __used, int cpu, u64 timestamp, struct thread *this_thread __used) { - struct thread *thread = perf_session__findnew(session, runtime_event->pid); + struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); BUG_ON(cpu >= MAX_CPUS || cpu < 0); @@ -1097,7 +1100,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, static void latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct perf_session *session, + struct machine *machine, struct event *__event __used, int cpu __used, u64 timestamp, @@ -1111,7 +1114,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = perf_session__findnew(session, wakeup_event->pid); + wakee = machine__findnew_thread(machine, wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1145,7 +1148,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, static void latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, - struct perf_session *session, + struct machine *machine, struct event *__event __used, int cpu __used, u64 timestamp, @@ -1161,7 +1164,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, if (profile_cpu == -1) return; - migrant = perf_session__findnew(session, migrate_task_event->pid); + migrant = machine__findnew_thread(machine, migrate_task_event->pid); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); if (!atoms) { thread_atoms_insert(migrant); @@ -1356,12 +1359,13 @@ static void sort_lat(void) static struct trace_sched_handler *trace_handler; static void -process_sched_wakeup_event(void *data, struct perf_session *session, +process_sched_wakeup_event(struct perf_tool *tool __used, struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; FILL_COMMON_FIELDS(wakeup_event, event, data); @@ -1373,8 +1377,8 @@ process_sched_wakeup_event(void *data, struct perf_session *session, FILL_FIELD(wakeup_event, cpu, event, data); if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, session, event, - cpu, timestamp, thread); + trace_handler->wakeup_event(&wakeup_event, machine, event, + sample->cpu, sample->time, thread); } /* @@ -1392,7 +1396,7 @@ static char next_shortname2 = '0'; static void map_switch_event(struct trace_switch_event *switch_event, - struct perf_session *session, + struct machine *machine, struct event *event __used, int this_cpu, u64 timestamp, @@ -1420,8 +1424,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - sched_out = perf_session__findnew(session, switch_event->prev_pid); - sched_in = perf_session__findnew(session, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, switch_event->prev_pid); + sched_in = machine__findnew_thread(machine, switch_event->next_pid); curr_thread[this_cpu] = sched_in; @@ -1469,14 +1473,15 @@ map_switch_event(struct trace_switch_event *switch_event, } } - static void -process_sched_switch_event(void *data, struct perf_session *session, +process_sched_switch_event(struct perf_tool *tool __used, struct event *event, - int this_cpu, - u64 timestamp __used, - struct thread *thread __used) + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + int this_cpu = sample->cpu; + void *data = sample->raw_data; struct trace_switch_event switch_event; FILL_COMMON_FIELDS(switch_event, event, data); @@ -1498,19 +1503,20 @@ process_sched_switch_event(void *data, struct perf_session *session, nr_context_switch_bugs++; } if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, session, event, - this_cpu, timestamp, thread); + trace_handler->switch_event(&switch_event, machine, event, + this_cpu, sample->time, thread); curr_pid[this_cpu] = switch_event.next_pid; } static void -process_sched_runtime_event(void *data, struct perf_session *session, - struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +process_sched_runtime_event(struct perf_tool *tool __used, + struct event *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + void *data = sample->raw_data; struct trace_runtime_event runtime_event; FILL_ARRAY(runtime_event, comm, event, data); @@ -1519,16 +1525,18 @@ process_sched_runtime_event(void *data, struct perf_session *session, FILL_FIELD(runtime_event, vruntime, event, data); if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); + trace_handler->runtime_event(&runtime_event, machine, event, + sample->cpu, sample->time, thread); } static void -process_sched_fork_event(void *data, +process_sched_fork_event(struct perf_tool *tool __used, struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct perf_sample *sample, + struct machine *machine __used, + struct thread *thread) { + void *data = sample->raw_data; struct trace_fork_event fork_event; FILL_COMMON_FIELDS(fork_event, event, data); @@ -1540,13 +1548,14 @@ process_sched_fork_event(void *data, if (trace_handler->fork_event) trace_handler->fork_event(&fork_event, event, - cpu, timestamp, thread); + sample->cpu, sample->time, thread); } static void -process_sched_exit_event(struct event *event, - int cpu __used, - u64 timestamp __used, +process_sched_exit_event(struct perf_tool *tool __used, + struct event *event, + struct perf_sample *sample __used, + struct machine *machine __used, struct thread *thread __used) { if (verbose) @@ -1554,12 +1563,13 @@ process_sched_exit_event(struct event *event, } static void -process_sched_migrate_task_event(void *data, struct perf_session *session, - struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +process_sched_migrate_task_event(struct perf_tool *tool __used, + struct event *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; FILL_COMMON_FIELDS(migrate_task_event, event, data); @@ -1570,67 +1580,47 @@ process_sched_migrate_task_event(void *data, struct perf_session *session, FILL_FIELD(migrate_task_event, cpu, event, data); if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, session, - event, cpu, timestamp, thread); + trace_handler->migrate_task_event(&migrate_task_event, machine, + event, sample->cpu, + sample->time, thread); } -static void process_raw_event(union perf_event *raw_event __used, - struct perf_session *session, void *data, int cpu, - u64 timestamp, struct thread *thread) -{ - struct event *event; - int type; - - - type = trace_parse_common_type(data); - event = trace_find_event(type); - - if (!strcmp(event->name, "sched_switch")) - process_sched_switch_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_stat_runtime")) - process_sched_runtime_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_wakeup")) - process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_wakeup_new")) - process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_process_fork")) - process_sched_fork_event(data, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_process_exit")) - process_sched_exit_event(event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_migrate_task")) - process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); -} +typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread); -static int process_sample_event(union perf_event *event, - struct perf_sample *sample, - struct perf_evsel *evsel __used, - struct perf_session *session) +static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, + union perf_event *event __used, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) { - struct thread *thread; - - if (!(session->sample_type & PERF_SAMPLE_RAW)) - return 0; + struct thread *thread = machine__findnew_thread(machine, sample->pid); - thread = perf_session__findnew(session, sample->pid); if (thread == NULL) { - pr_debug("problem processing %d event, skipping it.\n", - event->header.type); + pr_debug("problem processing %s event, skipping it.\n", + evsel->name); return -1; } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + evsel->hists.stats.total_period += sample->period; + hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); - if (profile_cpu != -1 && profile_cpu != (int)sample->cpu) - return 0; + if (evsel->handler.func != NULL) { + tracepoint_handler f = evsel->handler.func; - process_raw_event(event, session, sample->raw_data, sample->cpu, - sample->time, thread); + if (evsel->handler.data == NULL) + evsel->handler.data = trace_find_event(evsel->attr.config); + + f(tool, evsel->handler.data, sample, machine, thread); + } return 0; } -static struct perf_event_ops event_ops = { - .sample = process_sample_event, +static struct perf_tool perf_sched = { + .sample = perf_sched__process_tracepoint_sample, .comm = perf_event__process_comm, .lost = perf_event__process_lost, .fork = perf_event__process_task, @@ -1640,13 +1630,25 @@ static struct perf_event_ops event_ops = { static void read_events(bool destroy, struct perf_session **psession) { int err = -EINVAL; + const struct perf_evsel_str_handler handlers[] = { + { "sched:sched_switch", process_sched_switch_event, }, + { "sched:sched_stat_runtime", process_sched_runtime_event, }, + { "sched:sched_wakeup", process_sched_wakeup_event, }, + { "sched:sched_wakeup_new", process_sched_wakeup_event, }, + { "sched:sched_process_fork", process_sched_fork_event, }, + { "sched:sched_process_exit", process_sched_exit_event, }, + { "sched:sched_migrate_task", process_sched_migrate_task_event, }, + }; struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &event_ops); + 0, false, &perf_sched); if (session == NULL) die("No Memory"); + err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers); + assert(err == 0); + if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &event_ops); + err = perf_session__process_events(session, &perf_sched); if (err) die("Failed to process events, error %d", err); |