diff options
author | David Ahern | 2013-02-05 14:12:42 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo | 2013-02-06 18:09:26 -0300 |
commit | 0479b8b9cf4377df5d2c81506ce93326c31eff40 (patch) | |
tree | dff61dc7107f5b6563915b02347597489687fc1c /tools | |
parent | 5936f54d6ca2857d81188dcdff8c61b8fc482f53 (diff) |
perf evlist: Make event_copy local to mmaps
I am getting segfaults *after* the time sorting of perf samples where
the event type is off the charts:
(gdb) bt
\#0 0x0807b1b2 in hists__inc_nr_events (hists=0x80a99c4, type=1163281902) at util/hist.c:1225
\#1 0x08070795 in perf_session_deliver_event (session=0x80a9b90, event=0xf7a6aff8, sample=0xffffc318, tool=0xffffc520,
file_offset=0) at util/session.c:884
\#2 0x0806f9b9 in flush_sample_queue (s=0x80a9b90, tool=0xffffc520) at util/session.c:555
\#3 0x0806fc53 in process_finished_round (tool=0xffffc520, event=0x0, session=0x80a9b90) at util/session.c:645
This is bizarre because the event has already been processed once --
before it was added to the samples queue -- and the event was found to
be sane at that time.
There seem to be 2 causes:
1. perf_evlist__mmap_read updates the read location even though there
are outstanding references to events sitting in the mmap buffers via the
ordered samples queue.
2. There is a single evlist->event_copy for all evlist entries.
event_copy is used to handle an event wrapping at the mmap buffer
boundary.
This patch addresses the second problem - making event_copy local to
each perf_mmap. With this change my highly repeatable use case no longer
fails.
The first problem is much more complicated and will be the subject of a
future patch.
Signed-off-by: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1360098762-61827-1-git-send-email-dsahern@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/perf.h | 26 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 4 | ||||
-rw-r--r-- | tools/perf/util/evlist.h | 29 |
3 files changed, 30 insertions, 29 deletions
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 8f3bf388e414..c2206c87fc9f 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -103,32 +103,6 @@ #include "util/types.h" #include <stdbool.h> -struct perf_mmap { - void *base; - int mask; - unsigned int prev; -}; - -static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) -{ - struct perf_event_mmap_page *pc = mm->base; - int head = pc->data_head; - rmb(); - return head; -} - -static inline void perf_mmap__write_tail(struct perf_mmap *md, - unsigned long tail) -{ - struct perf_event_mmap_page *pc = md->base; - - /* - * ensure all reads are done before we write the tail out. - */ - /* mb(); */ - pc->data_tail = tail; -} - /* * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all * counters in the current task. diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index ecf123e080bb..bc4ad7977438 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -375,7 +375,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) if ((old & md->mask) + size != ((old + size) & md->mask)) { unsigned int offset = old; unsigned int len = min(sizeof(*event), size), cpy; - void *dst = &evlist->event_copy; + void *dst = &md->event_copy; do { cpy = min(md->mask + 1 - (offset & md->mask), len); @@ -385,7 +385,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) len -= cpy; } while (len); - event = &evlist->event_copy; + event = &md->event_copy; } old += size; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 73579a25a93e..2dd07bd60b4f 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -17,6 +17,13 @@ struct perf_record_opts; #define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) +struct perf_mmap { + void *base; + int mask; + unsigned int prev; + union perf_event event_copy; +}; + struct perf_evlist { struct list_head entries; struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; @@ -30,7 +37,6 @@ struct perf_evlist { pid_t pid; } workload; bool overwrite; - union perf_event event_copy; struct perf_mmap *mmap; struct pollfd *pollfd; struct thread_map *threads; @@ -136,4 +142,25 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) } size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); + +static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->base; + int head = pc->data_head; + rmb(); + return head; +} + +static inline void perf_mmap__write_tail(struct perf_mmap *md, + unsigned long tail) +{ + struct perf_event_mmap_page *pc = md->base; + + /* + * ensure all reads are done before we write the tail out. + */ + /* mb(); */ + pc->data_tail = tail; +} + #endif /* __PERF_EVLIST_H */ |