diff options
author | Alexey Budankov | 2018-11-06 12:07:19 +0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo | 2018-12-17 14:55:11 -0300 |
commit | 93f20c0fe3e86088ec041e47755367c4bed584c6 (patch) | |
tree | 108d59a511a822fa122db8371b2b49e243b885e9 /tools/perf/util | |
parent | d3d1af6f011a553a00d2bda90b2700c0d56bd8f7 (diff) |
perf record: Extend trace writing to multi AIO
Multi AIO trace writing allows caching more kernel data into userspace
memory postponing trace writing for the sake of overall profiling data
thruput increase. It could be seen as kernel data buffer extension into
userspace memory.
With an --aio option value different from 0 (default value is 1) the
tool has capability to cache more and more data into user space along
with delegating spill to AIO.
That allows avoiding to suspend at record__aio_sync() between calls of
record__mmap_read_evlist() and increases profiling data thruput at the
cost of userspace memory.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
Reviewed-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/050bb053-e7f3-aa83-fde7-f27ff90be7f6@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util')
-rw-r--r-- | tools/perf/util/mmap.c | 64 | ||||
-rw-r--r-- | tools/perf/util/mmap.h | 9 |
2 files changed, 48 insertions, 25 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 61aa381d05d0..ab30555d2afc 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -156,28 +156,50 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb #ifdef HAVE_AIO_SUPPORT static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) { - int delta_max; + int delta_max, i, prio; map->aio.nr_cblocks = mp->nr_cblocks; if (map->aio.nr_cblocks) { - map->aio.data = malloc(perf_mmap__mmap_len(map)); + map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); + if (!map->aio.aiocb) { + pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); + return -1; + } + map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); + if (!map->aio.cblocks) { + pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); + return -1; + } + map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); if (!map->aio.data) { pr_debug2("failed to allocate data buffer, error %m\n"); return -1; } - /* - * Use cblock.aio_fildes value different from -1 - * to denote started aio write operation on the - * cblock so it requires explicit record__aio_sync() - * call prior the cblock may be reused again. - */ - map->aio.cblock.aio_fildes = -1; - /* - * Allocate cblock with max priority delta to - * have faster aio write system calls. - */ delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); - map->aio.cblock.aio_reqprio = delta_max; + for (i = 0; i < map->aio.nr_cblocks; ++i) { + map->aio.data[i] = malloc(perf_mmap__mmap_len(map)); + if (!map->aio.data[i]) { + pr_debug2("failed to allocate data buffer area, error %m"); + return -1; + } + /* + * Use cblock.aio_fildes value different from -1 + * to denote started aio write operation on the + * cblock so it requires explicit record__aio_sync() + * call prior the cblock may be reused again. + */ + map->aio.cblocks[i].aio_fildes = -1; + /* + * Allocate cblocks with priority delta to have + * faster aio write system calls because queued requests + * are kept in separate per-prio queues and adding + * a new request will iterate thru shorter per-prio + * list. Blocks with numbers higher than + * _SC_AIO_PRIO_DELTA_MAX go with priority 0. + */ + prio = delta_max - i; + map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; + } } return 0; @@ -189,7 +211,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map) zfree(&map->aio.data); } -int perf_mmap__aio_push(struct perf_mmap *md, void *to, +int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx, int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off), off_t *off) { @@ -204,7 +226,7 @@ int perf_mmap__aio_push(struct perf_mmap *md, void *to, return (rc == -EAGAIN) ? 0 : -1; /* - * md->base data is copied into md->data buffer to + * md->base data is copied into md->data[idx] buffer to * release space in the kernel buffer as fast as possible, * thru perf_mmap__consume() below. * @@ -226,20 +248,20 @@ int perf_mmap__aio_push(struct perf_mmap *md, void *to, buf = &data[md->start & md->mask]; size = md->mask + 1 - (md->start & md->mask); md->start += size; - memcpy(md->aio.data, buf, size); + memcpy(md->aio.data[idx], buf, size); size0 = size; } buf = &data[md->start & md->mask]; size = md->end - md->start; md->start += size; - memcpy(md->aio.data + size0, buf, size); + memcpy(md->aio.data[idx] + size0, buf, size); /* - * Increment md->refcount to guard md->data buffer + * Increment md->refcount to guard md->data[idx] buffer * from premature deallocation because md object can be * released earlier than aio write request started - * on mmap->data is complete. + * on mmap->data[idx] is complete. * * perf_mmap__put() is done at record__aio_complete() * after started request completion. @@ -249,7 +271,7 @@ int perf_mmap__aio_push(struct perf_mmap *md, void *to, md->prev = head; perf_mmap__consume(md); - rc = push(to, &md->aio.cblock, md->aio.data, size0 + size, *off); + rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off); if (!rc) { *off += size0 + size; } else { diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index b99213ba11b5..aeb6942fdb00 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -32,8 +32,9 @@ struct perf_mmap { char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); #ifdef HAVE_AIO_SUPPORT struct { - void *data; - struct aiocb cblock; + void **data; + struct aiocb *cblocks; + struct aiocb **aiocb; int nr_cblocks; } aio; #endif @@ -97,11 +98,11 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map); int perf_mmap__push(struct perf_mmap *md, void *to, int push(struct perf_mmap *map, void *to, void *buf, size_t size)); #ifdef HAVE_AIO_SUPPORT -int perf_mmap__aio_push(struct perf_mmap *md, void *to, +int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx, int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off), off_t *off); #else -static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, +static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused, int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused, off_t *off __maybe_unused) { |