diff options
Diffstat (limited to 'tools/perf')
88 files changed, 1858 insertions, 1009 deletions
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index db11478e30b4..42261a9b280e 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -47,7 +47,8 @@ man5dir=$(mandir)/man5 man7dir=$(mandir)/man7 ASCIIDOC=asciidoc -ASCIIDOC_EXTRA = --unsafe +ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf +ASCIIDOC_HTML = xhtml11 MANPAGE_XSL = manpage-normal.xsl XMLTO_EXTRA = INSTALL?=install @@ -55,6 +56,14 @@ RM ?= rm -f DOC_REF = origin/man HTML_REF = origin/html +ifdef USE_ASCIIDOCTOR +ASCIIDOC = asciidoctor +ASCIIDOC_EXTRA = -a compat-mode +ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions +ASCIIDOC_EXTRA += -a mansource="perf" -a manmanual="perf Manual" +ASCIIDOC_HTML = xhtml5 +endif + infodir?=$(prefix)/share/info MAKEINFO=makeinfo INSTALL_INFO=install-info @@ -73,10 +82,12 @@ ifeq ($(_tmp_tool_path),) missing_tools = $(ASCIIDOC) endif +ifndef USE_ASCIIDOCTOR _tmp_tool_path := $(call get-executable,$(XMLTO)) ifeq ($(_tmp_tool_path),) missing_tools += $(XMLTO) endif +endif # # For asciidoc ... @@ -264,9 +275,17 @@ clean: $(MAN_HTML): $(OUTPUT)%.html : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ - $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \ + $(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \ + $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ + mv $@+ $@ + +ifdef USE_ASCIIDOCTOR +$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt + $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ + $(ASCIIDOC) -b manpage -d manpage \ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ mv $@+ $@ +endif $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml $(QUIET_XMLTO)$(RM) $@ && \ @@ -274,7 +293,7 @@ $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml $(OUTPUT)%.xml : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ - $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ + $(ASCIIDOC) -b docbook -d manpage \ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ mv $@+ $@ @@ -321,13 +340,13 @@ howto-index.txt: howto-index.sh $(wildcard howto/*.txt) mv $@+ $@ $(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt - $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt + $(QUIET_ASCIIDOC)$(ASCIIDOC) -b $(ASCIIDOC_HTML) $*.txt WEBDOC_DEST = /pub/software/tools/perf/docs $(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ - sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \ + sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b $(ASCIIDOC_HTML) - >$@+ && \ mv $@+ $@ # UNIMPLEMENTED diff --git a/tools/perf/Documentation/asciidoctor-extensions.rb b/tools/perf/Documentation/asciidoctor-extensions.rb new file mode 100644 index 000000000000..d148fe95c0c4 --- /dev/null +++ b/tools/perf/Documentation/asciidoctor-extensions.rb @@ -0,0 +1,29 @@ +require 'asciidoctor' +require 'asciidoctor/extensions' + +module Perf + module Documentation + class LinkPerfProcessor < Asciidoctor::Extensions::InlineMacroProcessor + use_dsl + + named :chrome + + def process(parent, target, attrs) + if parent.document.basebackend? 'html' + %(<a href="#{target}.html">#{target}(#{attrs[1]})</a>\n) + elsif parent.document.basebackend? 'manpage' + "#{target}(#{attrs[1]})" + elsif parent.document.basebackend? 'docbook' + "<citerefentry>\n" \ + "<refentrytitle>#{target}</refentrytitle>" \ + "<manvolnum>#{attrs[1]}</manvolnum>\n" \ + "</citerefentry>\n" + end + end + end + end +end + +Asciidoctor::Extensions.register do + inline_macro Perf::Documentation::LinkPerfProcessor, :linkperf +end diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt index 73c2650bd0db..f6de0952ff3c 100644 --- a/tools/perf/Documentation/perf-buildid-cache.txt +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -48,6 +48,9 @@ OPTIONS --purge=:: Purge all cached binaries including older caches which have specified path from the cache. +-P:: +--purge-all:: + Purge all cached binaries. This will flush out entire cache. -M:: --missing=:: List missing build ids in the cache for the specified file. @@ -59,7 +62,9 @@ OPTIONS exactly same build-id, that is replaced by new one. It can be used to update kallsyms and kernel dso to vmlinux in order to support annotation. - +-l:: +--list:: + List all valid binaries from cache. -v:: --verbose:: Be more verbose. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index e6c3b4e555c2..3a822f308e6d 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -116,6 +116,22 @@ Do not aggregate counts across all monitored CPUs. print counts using a CSV-style output to make it easy to import directly into spreadsheets. Columns are separated by the string specified in SEP. +--table:: Display time for each run (-r option), in a table format, e.g.: + + $ perf stat --null -r 5 --table perf bench sched pipe + + Performance counter stats for 'perf bench sched pipe' (5 runs): + + # Table of individual measurements: + 5.189 (-0.293) # + 5.189 (-0.294) # + 5.186 (-0.296) # + 5.663 (+0.181) ## + 6.186 (+0.703) #### + + # Final result: + 5.483 +- 0.198 seconds time elapsed ( +- 3.62% ) + -G name:: --cgroup name:: monitor only in the container (cgroup) called "name". This option is available only diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index ae7dc46e8f8a..b5ac356ba323 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -885,6 +885,8 @@ endif # Among the variables below, these: # perfexecdir +# perf_include_dir +# perf_examples_dir # template_dir # mandir # infodir @@ -904,6 +906,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative)) mandir = share/man infodir = share/info perfexecdir = libexec/perf-core +perf_include_dir = lib/include/perf +perf_examples_dir = lib/examples/perf sharedir = $(prefix)/share template_dir = share/perf-core/templates STRACE_GROUPS_DIR = share/perf-core/strace/groups @@ -934,6 +938,8 @@ bindir_SQ = $(subst ','\'',$(bindir)) mandir_SQ = $(subst ','\'',$(mandir)) infodir_SQ = $(subst ','\'',$(infodir)) perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) +perf_include_dir_SQ = $(subst ','\'',$(perf_include_dir)) +perf_examples_dir_SQ = $(subst ','\'',$(perf_examples_dir)) template_dir_SQ = $(subst ','\'',$(template_dir)) htmldir_SQ = $(subst ','\'',$(htmldir)) tipdir_SQ = $(subst ','\'',$(tipdir)) @@ -944,14 +950,20 @@ srcdir_SQ = $(subst ','\'',$(srcdir)) ifneq ($(filter /%,$(firstword $(perfexecdir))),) perfexec_instdir = $(perfexecdir) +perf_include_instdir = $(perf_include_dir) +perf_examples_instdir = $(perf_examples_dir) STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR) tip_instdir = $(tipdir) else perfexec_instdir = $(prefix)/$(perfexecdir) +perf_include_instdir = $(prefix)/$(perf_include_dir) +perf_examples_instdir = $(prefix)/$(perf_examples_dir) STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR) tip_instdir = $(prefix)/$(tipdir) endif perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) +perf_include_instdir_SQ = $(subst ','\'',$(perf_include_instdir)) +perf_examples_instdir_SQ = $(subst ','\'',$(perf_examples_instdir)) STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR)) tip_instdir_SQ = $(subst ','\'',$(tip_instdir)) @@ -999,6 +1011,8 @@ $(call detected_var,ETC_PERFCONFIG_SQ) $(call detected_var,STRACE_GROUPS_DIR_SQ) $(call detected_var,prefix_SQ) $(call detected_var,perfexecdir_SQ) +$(call detected_var,perf_include_dir_SQ) +$(call detected_var,perf_examples_dir_SQ) $(call detected_var,tipdir_SQ) $(call detected_var,srcdir_SQ) $(call detected_var,LIBDIR) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 83e453de36f8..ecc9fc952655 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -767,6 +767,16 @@ ifndef NO_JVMTI endif $(call QUIET_INSTALL, libexec) \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' +ifndef NO_LIBBPF + $(call QUIET_INSTALL, lib) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' + $(call QUIET_INSTALL, include/bpf) \ + $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' + $(call QUIET_INSTALL, lib) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf' + $(call QUIET_INSTALL, examples/bpf) \ + $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf' +endif $(call QUIET_INSTALL, perf-archive) \ $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(call QUIET_INSTALL, perf-with-kcore) \ diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c index 8cb347760233..9a0242e74cfc 100644 --- a/tools/perf/arch/arm/tests/dwarf-unwind.c +++ b/tools/perf/arch/arm/tests/dwarf-unwind.c @@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample, sp = (unsigned long) regs[PERF_REG_ARM_SP]; - map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); + map = map_groups__find(thread->mg, (u64)sp); if (!map) { pr_debug("failed to get stack map\n"); free(buf); diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c index e907f0f4c20c..5522ce384723 100644 --- a/tools/perf/arch/arm64/tests/dwarf-unwind.c +++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c @@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample, sp = (unsigned long) regs[PERF_REG_ARM64_SP]; - map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); + map = map_groups__find(thread->mg, (u64)sp); if (!map) { pr_debug("failed to get stack map\n"); free(buf); diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c index 30cbbd6d5be0..5f39efef0856 100644 --- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c +++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c @@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample, sp = (unsigned long) regs[PERF_REG_POWERPC_R1]; - map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); + map = map_groups__find(thread->mg, (u64)sp); if (!map) { pr_debug("failed to get stack map\n"); free(buf); diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 0c370f81e002..3598b8b75d27 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -248,8 +248,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) ip = chain->ips[2]; - thread__find_addr_location(thread, PERF_RECORD_MISC_USER, - MAP__FUNCTION, ip, &al); + thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); if (al.map) dso = al.map->dso; diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c index 95036c7a59e8..7879df34569a 100644 --- a/tools/perf/arch/x86/tests/dwarf-unwind.c +++ b/tools/perf/arch/x86/tests/dwarf-unwind.c @@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample, sp = (unsigned long) regs[PERF_REG_X86_SP]; - map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); + map = map_groups__find(thread->mg, (u64)sp); if (!map) { pr_debug("failed to get stack map\n"); free(buf); diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build index f95e6f46ef0d..844b8f335532 100644 --- a/tools/perf/arch/x86/util/Build +++ b/tools/perf/arch/x86/util/Build @@ -4,6 +4,8 @@ libperf-y += pmu.o libperf-y += kvm-stat.o libperf-y += perf_regs.o libperf-y += group.o +libperf-y += machine.o +libperf-y += event.o libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c new file mode 100644 index 000000000000..675a0213044d --- /dev/null +++ b/tools/perf/arch/x86/util/event.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/string.h> + +#include "../../util/machine.h" +#include "../../util/tool.h" +#include "../../util/map.h" +#include "../../util/util.h" +#include "../../util/debug.h" + +#if defined(__x86_64__) + +int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + int rc = 0; + struct map *pos; + struct map_groups *kmaps = &machine->kmaps; + struct maps *maps = &kmaps->maps; + union perf_event *event = zalloc(sizeof(event->mmap) + + machine->id_hdr_size); + + if (!event) { + pr_debug("Not enough memory synthesizing mmap event " + "for extra kernel maps\n"); + return -1; + } + + for (pos = maps__first(maps); pos; pos = map__next(pos)) { + struct kmap *kmap; + size_t size; + + if (!__map__is_extra_kernel_map(pos)) + continue; + + kmap = map__kmap(pos); + + size = sizeof(event->mmap) - sizeof(event->mmap.filename) + + PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + + machine->id_hdr_size; + + memset(event, 0, size); + + event->mmap.header.type = PERF_RECORD_MMAP; + + /* + * kernel uses 0 for user space maps, see kernel/perf_event.c + * __perf_event_mmap + */ + if (machine__is_host(machine)) + event->header.misc = PERF_RECORD_MISC_KERNEL; + else + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + + event->mmap.header.size = size; + + event->mmap.start = pos->start; + event->mmap.len = pos->end - pos->start; + event->mmap.pgoff = pos->pgoff; + event->mmap.pid = machine->pid; + + strlcpy(event->mmap.filename, kmap->name, PATH_MAX); + + if (perf_tool__process_synth_event(tool, event, machine, + process) != 0) { + rc = -1; + break; + } + } + + free(event); + return rc; +} + +#endif diff --git a/tools/perf/arch/x86/util/machine.c b/tools/perf/arch/x86/util/machine.c new file mode 100644 index 000000000000..4520ac53caa9 --- /dev/null +++ b/tools/perf/arch/x86/util/machine.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/string.h> +#include <stdlib.h> + +#include "../../util/machine.h" +#include "../../util/map.h" +#include "../../util/symbol.h" +#include "../../util/sane_ctype.h" + +#include <symbol/kallsyms.h> + +#if defined(__x86_64__) + +struct extra_kernel_map_info { + int cnt; + int max_cnt; + struct extra_kernel_map *maps; + bool get_entry_trampolines; + u64 entry_trampoline; +}; + +static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start, + u64 end, u64 pgoff, const char *name) +{ + if (mi->cnt >= mi->max_cnt) { + void *buf; + size_t sz; + + mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32; + sz = sizeof(struct extra_kernel_map) * mi->max_cnt; + buf = realloc(mi->maps, sz); + if (!buf) + return -1; + mi->maps = buf; + } + + mi->maps[mi->cnt].start = start; + mi->maps[mi->cnt].end = end; + mi->maps[mi->cnt].pgoff = pgoff; + strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN); + + mi->cnt += 1; + + return 0; +} + +static int find_extra_kernel_maps(void *arg, const char *name, char type, + u64 start) +{ + struct extra_kernel_map_info *mi = arg; + + if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL && + !strcmp(name, "_entry_trampoline")) { + mi->entry_trampoline = start; + return 0; + } + + if (is_entry_trampoline(name)) { + u64 end = start + page_size; + + return add_extra_kernel_map(mi, start, end, 0, name); + } + + return 0; +} + +int machine__create_extra_kernel_maps(struct machine *machine, + struct dso *kernel) +{ + struct extra_kernel_map_info mi = { .cnt = 0, }; + char filename[PATH_MAX]; + int ret; + int i; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return 0; + + ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps); + if (ret) + goto out_free; + + if (!mi.entry_trampoline) + goto out_free; + + for (i = 0; i < mi.cnt; i++) { + struct extra_kernel_map *xm = &mi.maps[i]; + + xm->pgoff = mi.entry_trampoline; + ret = machine__create_extra_kernel_map(machine, kernel, xm); + if (ret) + goto out_free; + } + + machine->trampolines_mapped = mi.cnt; +out_free: + free(mi.maps); + return ret; +} + +#endif diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 51709a961496..da5704240239 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -45,6 +45,7 @@ struct perf_annotate { bool print_line; bool skip_missing; bool has_br_stack; + bool group_set; const char *sym_hist_filter; const char *cpu_list; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -228,7 +229,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel, */ if (al->sym != NULL) { rb_erase(&al->sym->rb_node, - &al->map->dso->symbols[al->map->type]); + &al->map->dso->symbols); symbol__delete(al->sym); dso__reset_find_symbol_cache(al->map->dso); } @@ -508,6 +509,9 @@ int cmd_annotate(int argc, const char **argv) "Don't shorten the displayed pathnames"), OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, "Skip symbols that cannot be annotated"), + OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, + &annotate.group_set, + "Show event group information together"), OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), OPT_CALLBACK(0, "symfs", NULL, "directory", "Look for files with symbols relative to this directory", @@ -570,6 +574,9 @@ int cmd_annotate(int argc, const char **argv) annotate.has_br_stack = perf_header__has_feat(&annotate.session->header, HEADER_BRANCH_STACK); + if (annotate.group_set) + perf_evlist__force_leader(annotate.session->evlist); + ret = symbol__annotation_init(); if (ret < 0) goto out_delete; diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 41db2cba77eb..115110a4796a 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -25,6 +25,7 @@ #include "util/session.h" #include "util/symbol.h" #include "util/time-utils.h" +#include "util/probe-file.h" static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid) { @@ -239,6 +240,34 @@ out: return err; } +static int build_id_cache__purge_all(void) +{ + struct strlist *list; + struct str_node *pos; + int err = 0; + char *buf; + + list = build_id_cache__list_all(false); + if (!list) { + pr_debug("Failed to get buildids: -%d\n", errno); + return -EINVAL; + } + + strlist__for_each_entry(pos, list) { + buf = build_id_cache__origname(pos->s); + err = build_id_cache__remove_s(pos->s); + pr_debug("Removing %s (%s): %s\n", buf, pos->s, + err ? "FAIL" : "Ok"); + free(buf); + if (err) + break; + } + strlist__delete(list); + + pr_debug("Purged all: %s\n", err ? "FAIL" : "Ok"); + return err; +} + static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) { char filename[PATH_MAX]; @@ -297,6 +326,26 @@ static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi) return err; } +static int build_id_cache__show_all(void) +{ + struct strlist *bidlist; + struct str_node *nd; + char *buf; + + bidlist = build_id_cache__list_all(true); + if (!bidlist) { + pr_debug("Failed to get buildids: -%d\n", errno); + return -1; + } + strlist__for_each_entry(nd, bidlist) { + buf = build_id_cache__origname(nd->s); + fprintf(stdout, "%s %s\n", nd->s, buf); + free(buf); + } + strlist__delete(bidlist); + return 0; +} + int cmd_buildid_cache(int argc, const char **argv) { struct strlist *list; @@ -304,6 +353,9 @@ int cmd_buildid_cache(int argc, const char **argv) int ret = 0; int ns_id = -1; bool force = false; + bool list_files = false; + bool opts_flag = false; + bool purge_all = false; char const *add_name_list_str = NULL, *remove_name_list_str = NULL, *purge_name_list_str = NULL, @@ -327,6 +379,8 @@ int cmd_buildid_cache(int argc, const char **argv) "file(s) to remove"), OPT_STRING('p', "purge", &purge_name_list_str, "file list", "file(s) to remove (remove old caches too)"), + OPT_BOOLEAN('P', "purge-all", &purge_all, "purge all cached files"), + OPT_BOOLEAN('l', "list", &list_files, "list all cached files"), OPT_STRING('M', "missing", &missing_filename, "file", "to find missing build ids in the cache"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), @@ -344,11 +398,20 @@ int cmd_buildid_cache(int argc, const char **argv) argc = parse_options(argc, argv, buildid_cache_options, buildid_cache_usage, 0); - if (argc || (!add_name_list_str && !kcore_filename && - !remove_name_list_str && !purge_name_list_str && - !missing_filename && !update_name_list_str)) + opts_flag = add_name_list_str || kcore_filename || + remove_name_list_str || purge_name_list_str || + missing_filename || update_name_list_str || + purge_all; + + if (argc || !(list_files || opts_flag)) usage_with_options(buildid_cache_usage, buildid_cache_options); + /* -l is exclusive. It can not be used with other options. */ + if (list_files && opts_flag) { + usage_with_options_msg(buildid_cache_usage, + buildid_cache_options, "-l is exclusive.\n"); + } + if (ns_id > 0) nsi = nsinfo__new(ns_id); @@ -366,6 +429,11 @@ int cmd_buildid_cache(int argc, const char **argv) setup_pager(); + if (list_files) { + ret = build_id_cache__show_all(); + goto out; + } + if (add_name_list_str) { list = strlist__new(add_name_list_str, NULL); if (list) { @@ -420,6 +488,13 @@ int cmd_buildid_cache(int argc, const char **argv) } } + if (purge_all) { + if (build_id_cache__purge_all()) { + pr_warning("Couldn't remove some caches. Error: %s.\n", + str_error_r(errno, sbuf, sizeof(sbuf))); + } + } + if (missing_filename) ret = build_id_cache__fprintf_missing(session, stdout); diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 40fe919bbcf3..a3b346359ba0 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -440,9 +440,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool, goto repipe; } - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al); - - if (al.map != NULL) { + if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) { if (!al.map->dso->hit) { al.map->dso->hit = 1; if (map__load(al.map) >= 0) { diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c index bcfb363112d3..90d1a2305b72 100644 --- a/tools/perf/builtin-kallsyms.c +++ b/tools/perf/builtin-kallsyms.c @@ -27,7 +27,7 @@ static int __cmd_kallsyms(int argc, const char **argv) for (i = 0; i < argc; ++i) { struct map *map; - struct symbol *symbol = machine__find_kernel_function_by_name(machine, argv[i], &map); + struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map); if (symbol == NULL) { printf("%s: not found\n", argv[i]); diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index ae11e4c3516a..54d3f21b0e62 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -1004,7 +1004,7 @@ static void __print_slab_result(struct rb_root *root, if (is_caller) { addr = data->call_site; if (!raw_ip) - sym = machine__find_kernel_function(machine, addr, &map); + sym = machine__find_kernel_symbol(machine, addr, &map); } else addr = data->ptr; @@ -1068,7 +1068,7 @@ static void __print_page_alloc_result(struct perf_session *session, int n_lines) char *caller = buf; data = rb_entry(next, struct page_stat, node); - sym = machine__find_kernel_function(machine, data->callsite, &map); + sym = machine__find_kernel_symbol(machine, data->callsite, &map); if (sym) caller = sym->name; else @@ -1110,7 +1110,7 @@ static void __print_page_caller_result(struct perf_session *session, int n_lines char *caller = buf; data = rb_entry(next, struct page_stat, node); - sym = machine__find_kernel_function(machine, data->callsite, &map); + sym = machine__find_kernel_symbol(machine, data->callsite, &map); if (sym) caller = sym->name; else diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0f198f6d9b77..ad978e3ee2b8 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -194,20 +194,11 @@ out: return err; } -/* - * Events in data file are not collect in groups, but we still want - * the group display. Set the artificial group and set the leader's - * forced_leader flag to notify the display code. - */ static void setup_forced_leader(struct report *report, struct perf_evlist *evlist) { - if (report->group_set && !evlist->nr_groups) { - struct perf_evsel *leader = perf_evlist__first(evlist); - - perf_evlist__set_leader(evlist); - leader->forced_leader = true; - } + if (report->group_set) + perf_evlist__force_leader(evlist); } static int process_feature_event(struct perf_tool *tool, @@ -523,12 +514,9 @@ static void report__warn_kptr_restrict(const struct report *rep) "As no suitable kallsyms nor vmlinux was found, kernel samples\n" "can't be resolved."; - if (kernel_map) { - const struct dso *kdso = kernel_map->dso; - if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) { - desc = "If some relocation was applied (e.g. " - "kexec) symbols may be misresolved."; - } + if (kernel_map && map__has_symbols(kernel_map)) { + desc = "If some relocation was applied (e.g. " + "kexec) symbols may be misresolved."; } ui__warning( @@ -718,10 +706,7 @@ static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp) { - int printed = 0, i; - for (i = 0; i < MAP__NR_TYPES; ++i) - printed += maps__fprintf_task(&mg->maps[i], indent, fp); - return printed; + return maps__fprintf_task(&mg->maps, indent, fp); } static void task__print_level(struct task *task, FILE *fp, int level) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index e0a9845b6cbc..cefc8813e91e 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -153,8 +153,8 @@ static struct { .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | - PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | - PERF_OUTPUT_PERIOD, + PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | + PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, }, @@ -165,8 +165,9 @@ static struct { .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | - PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | - PERF_OUTPUT_PERIOD | PERF_OUTPUT_BPF_OUTPUT, + PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | + PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD | + PERF_OUTPUT_BPF_OUTPUT, .invalid_fields = PERF_OUTPUT_TRACE, }, @@ -185,10 +186,10 @@ static struct { .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | - PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | - PERF_OUTPUT_PERIOD | PERF_OUTPUT_ADDR | - PERF_OUTPUT_DATA_SRC | PERF_OUTPUT_WEIGHT | - PERF_OUTPUT_PHYS_ADDR, + PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | + PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD | + PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC | + PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR, .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, }, @@ -199,8 +200,8 @@ static struct { .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | - PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | - PERF_OUTPUT_PERIOD, + PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | + PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, }, @@ -211,8 +212,8 @@ static struct { .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | - PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | - PERF_OUTPUT_SYNTH, + PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | + PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH, .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, }, @@ -544,6 +545,7 @@ static int perf_session__check_output_opt(struct perf_session *session) if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) { output[j].fields |= PERF_OUTPUT_IP; output[j].fields |= PERF_OUTPUT_SYM; + output[j].fields |= PERF_OUTPUT_SYMOFFSET; output[j].fields |= PERF_OUTPUT_DSO; set_print_ip_opts(attr); goto out; @@ -717,8 +719,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample, if (PRINT_FIELD(DSO)) { memset(&alf, 0, sizeof(alf)); memset(&alt, 0, sizeof(alt)); - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf); - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); + thread__find_map(thread, sample->cpumode, from, &alf); + thread__find_map(thread, sample->cpumode, to, &alt); } printed += fprintf(fp, " 0x%"PRIx64, from); @@ -764,13 +766,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample, from = br->entries[i].from; to = br->entries[i].to; - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf); - if (alf.map) - alf.sym = map__find_symbol(alf.map, alf.addr); - - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); - if (alt.map) - alt.sym = map__find_symbol(alt.map, alt.addr); + thread__find_symbol(thread, sample->cpumode, from, &alf); + thread__find_symbol(thread, sample->cpumode, to, &alt); printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp); if (PRINT_FIELD(DSO)) { @@ -814,12 +811,12 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample, from = br->entries[i].from; to = br->entries[i].to; - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf); - if (alf.map && !alf.map->dso->adjust_symbols) + if (thread__find_map(thread, sample->cpumode, from, &alf) && + !alf.map->dso->adjust_symbols) from = map__map_ip(alf.map, from); - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); - if (alt.map && !alt.map->dso->adjust_symbols) + if (thread__find_map(thread, sample->cpumode, to, &alt) && + !alt.map->dso->adjust_symbols) to = map__map_ip(alt.map, to); printed += fprintf(fp, " 0x%"PRIx64, from); @@ -882,8 +879,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end, return 0; } - thread__find_addr_map(thread, *cpumode, MAP__FUNCTION, start, &al); - if (!al.map || !al.map->dso) { + if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) { pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); return 0; } @@ -933,10 +929,8 @@ static int ip__fprintf_sym(uint64_t addr, struct thread *thread, memset(&al, 0, sizeof(al)); - thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al); - if (!al.map) - thread__find_addr_map(thread, cpumode, MAP__VARIABLE, - addr, &al); + thread__find_map(thread, cpumode, addr, &al); + if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end) return 0; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f17dc601b0f3..a4f662a462c6 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -164,6 +164,7 @@ static bool forever = false; static bool metric_only = false; static bool force_metric_only = false; static bool no_merge = false; +static bool walltime_run_table = false; static struct timespec ref_time; static struct cpu_map *aggr_map; static aggr_get_id_t aggr_get_id; @@ -173,6 +174,7 @@ static const char *output_name; static int output_fd; static int print_free_counters_hint; static int print_mixed_hw_group_error; +static u64 *walltime_run; struct perf_stat { bool record; @@ -569,7 +571,7 @@ static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel) return leader; } -static int __run_perf_stat(int argc, const char **argv) +static int __run_perf_stat(int argc, const char **argv, int run_idx) { int interval = stat_config.interval; int times = stat_config.times; @@ -752,6 +754,9 @@ try_again: t1 = rdclock(); + if (walltime_run_table) + walltime_run[run_idx] = t1 - t0; + update_stats(&walltime_nsecs_stats, t1 - t0); /* @@ -766,7 +771,7 @@ try_again: return WEXITSTATUS(status); } -static int run_perf_stat(int argc, const char **argv) +static int run_perf_stat(int argc, const char **argv, int run_idx) { int ret; @@ -779,7 +784,7 @@ static int run_perf_stat(int argc, const char **argv) if (sync_run) sync(); - ret = __run_perf_stat(argc, argv); + ret = __run_perf_stat(argc, argv, run_idx); if (ret) return ret; @@ -1764,19 +1769,67 @@ static void print_header(int argc, const char **argv) } } +static int get_precision(double num) +{ + if (num > 1) + return 0; + + return lround(ceil(-log10(num))); +} + +static void print_table(FILE *output, int precision, double avg) +{ + char tmp[64]; + int idx, indent = 0; + + scnprintf(tmp, 64, " %17.*f", precision, avg); + while (tmp[indent] == ' ') + indent++; + + fprintf(output, "%*s# Table of individual measurements:\n", indent, ""); + + for (idx = 0; idx < run_count; idx++) { + double run = (double) walltime_run[idx] / NSEC_PER_SEC; + int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5); + + fprintf(output, " %17.*f (%+.*f) ", + precision, run, precision, run - avg); + + for (h = 0; h < n; h++) + fprintf(output, "#"); + + fprintf(output, "\n"); + } + + fprintf(output, "\n%*s# Final result:\n", indent, ""); +} + static void print_footer(void) { + double avg = avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC; FILE *output = stat_config.output; int n; if (!null_run) fprintf(output, "\n"); - fprintf(output, " %17.9f seconds time elapsed", - avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC); - if (run_count > 1) { - fprintf(output, " "); - print_noise_pct(stddev_stats(&walltime_nsecs_stats), - avg_stats(&walltime_nsecs_stats)); + + if (run_count == 1) { + fprintf(output, " %17.9f seconds time elapsed", avg); + } else { + double sd = stddev_stats(&walltime_nsecs_stats) / NSEC_PER_SEC; + /* + * Display at most 2 more significant + * digits than the stddev inaccuracy. + */ + int precision = get_precision(sd) + 2; + + if (walltime_run_table) + print_table(output, precision, avg); + + fprintf(output, " %17.*f +- %.*f seconds time elapsed", + precision, avg, precision, sd); + + print_noise_pct(sd, avg); } fprintf(output, "\n\n"); @@ -1952,6 +2005,8 @@ static const struct option stat_options[] = { "be more verbose (show counter open errors, etc)"), OPT_INTEGER('r', "repeat", &run_count, "repeat command and print average + stddev (max: 100, forever: 0)"), + OPT_BOOLEAN(0, "table", &walltime_run_table, + "display details about each run (only with -r option)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), OPT_INCR('d', "detailed", &detailed_run, @@ -2843,6 +2898,13 @@ int cmd_stat(int argc, const char **argv) goto out; } + if (walltime_run_table && run_count <= 1) { + fprintf(stderr, "--table is only supported with -r\n"); + parse_options_usage(stat_usage, stat_options, "r", 1); + parse_options_usage(NULL, stat_options, "table", 0); + goto out; + } + if (output_fd < 0) { fprintf(stderr, "argument to --log-fd must be a > 0\n"); parse_options_usage(stat_usage, stat_options, "log-fd", 0); @@ -2897,6 +2959,14 @@ int cmd_stat(int argc, const char **argv) run_count = 1; } + if (walltime_run_table) { + walltime_run = zalloc(run_count * sizeof(walltime_run[0])); + if (!walltime_run) { + pr_err("failed to setup -r option"); + goto out; + } + } + if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) { if (!target.system_wide || target.cpu_list) { @@ -3012,7 +3082,7 @@ int cmd_stat(int argc, const char **argv) fprintf(output, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); - status = run_perf_stat(argc, argv); + status = run_perf_stat(argc, argv, run_idx); if (forever && status != -1) { print_counters(NULL, argc, argv); perf_stat__reset_stats(); @@ -3060,6 +3130,8 @@ int cmd_stat(int argc, const char **argv) perf_stat__exit_aggr_mode(); perf_evlist__free_stats(evsel_list); out: + free(walltime_run); + if (smi_cost && smi_reset) sysfs__write_int(FREEZE_ON_SMI_PATH, 0); diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 813698a9b8c7..a827919c6263 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -533,12 +533,8 @@ static const char *cat_backtrace(union perf_event *event, } tal.filtered = 0; - thread__find_addr_location(al.thread, cpumode, - MAP__FUNCTION, ip, &tal); - - if (tal.sym) - fprintf(f, "..... %016" PRIx64 " %s\n", ip, - tal.sym->name); + if (thread__find_symbol(al.thread, cpumode, ip, &tal)) + fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name); else fprintf(f, "..... %016" PRIx64 "\n", ip); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index f39bd60d2708..7a349fcd3864 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -742,7 +742,7 @@ static void perf_event__process_sample(struct perf_tool *tool, "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" "Check /proc/sys/kernel/kptr_restrict.\n\n" "Kernel%s samples will not be resolved.\n", - al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? + al.map && map__has_symbols(al.map) ? " modules" : ""); if (use_browser <= 0) sleep(5); @@ -750,7 +750,7 @@ static void perf_event__process_sample(struct perf_tool *tool, machine->kptr_restrict_warned = true; } - if (al.sym == NULL) { + if (al.sym == NULL && al.map != NULL) { const char *msg = "Kernel samples will not be resolved.\n"; /* * As we do lazy loading of symtabs we only will know if the @@ -764,8 +764,7 @@ static void perf_event__process_sample(struct perf_tool *tool, * invalid --vmlinux ;-) */ if (!machine->kptr_restrict_warned && !top->vmlinux_warned && - al.map == machine->vmlinux_maps[MAP__FUNCTION] && - RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { + __map__is_kernel(al.map) && map__has_symbols(al.map)) { if (symbol_conf.vmlinux_name) { char serr[256]; dso__strerror_load(al.map->dso, serr, sizeof(serr)); @@ -1265,7 +1264,7 @@ int cmd_top(int argc, const char **argv) .proc_map_timeout = 500, .overwrite = 1, }, - .max_stack = sysctl_perf_event_max_stack, + .max_stack = sysctl__max_stack(), .sym_pcnt_filter = 5, .nr_threads_synthesize = UINT_MAX, }; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 3ad17ee89403..560aed7da36a 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2024,8 +2024,7 @@ static int trace__pgfault(struct trace *trace, if (trace->summary_only) goto out; - thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION, - sample->ip, &al); + thread__find_symbol(thread, sample->cpumode, sample->ip, &al); trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); @@ -2037,12 +2036,10 @@ static int trace__pgfault(struct trace *trace, fprintf(trace->output, "] => "); - thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE, - sample->addr, &al); + thread__find_symbol(thread, sample->cpumode, sample->addr, &al); if (!al.map) { - thread__find_addr_location(thread, sample->cpumode, - MAP__FUNCTION, sample->addr, &al); + thread__find_symbol(thread, sample->cpumode, sample->addr, &al); if (al.map) map_type = 'x'; @@ -3165,7 +3162,7 @@ int cmd_trace(int argc, const char **argv) mmap_pages_user_set = false; if (trace.max_stack == UINT_MAX) { - trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack; + trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); max_stack_user_set = false; } diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 9aff89bc7535..10f333e2e825 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -55,22 +55,26 @@ include/uapi/asm-generic/ioctls.h include/uapi/asm-generic/mman-common.h ' -check () { - file=$1 +check_2 () { + file1=$1 + file2=$2 shift - opts= - while [ -n "$*" ]; do - opts="$opts \"$1\"" - shift - done + shift - cmd="diff $opts ../$file ../../$file > /dev/null" + cmd="diff $* $file1 $file2 > /dev/null" - test -f ../../$file && + test -f $file2 && eval $cmd || echo "Warning: Kernel ABI header at 'tools/$file' differs from latest version at '$file'" >&2 } +check () { + file=$1 + + shift + + check_2 ../$file ../../$file $* +} # Check if we have the kernel headers (tools/perf/../../include), else # we're probably on a detached tarball, so no point in trying to check @@ -83,7 +87,7 @@ for i in $HEADERS; do done # diff with extra ignore lines -check arch/x86/lib/memcpy_64.S -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -check arch/x86/lib/memset_64.S -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -check include/uapi/asm-generic/mman.h -I "^#include <\(uapi/\)*asm-generic/mman-common.h>" -check include/uapi/linux/mman.h -I "^#include <\(uapi/\)*asm/mman.h>" +check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' +check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' +check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"' +check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' diff --git a/tools/perf/examples/bpf/5sec.c b/tools/perf/examples/bpf/5sec.c new file mode 100644 index 000000000000..b9c203219691 --- /dev/null +++ b/tools/perf/examples/bpf/5sec.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + Description: + + . Disable strace like syscall tracing (--no-syscalls), or try tracing + just some (-e *sleep). + + . Attach a filter function to a kernel function, returning when it should + be considered, i.e. appear on the output. + + . Run it system wide, so that any sleep of >= 5 seconds and < than 6 + seconds gets caught. + + . Ask for callgraphs using DWARF info, so that userspace can be unwound + + . While this is running, run something like "sleep 5s". + + . If we decide to add tv_nsec as well, then it becomes: + + int probe(hrtimer_nanosleep, rqtp->tv_sec rqtp->tv_nsec)(void *ctx, int err, long sec, long nsec) + + I.e. add where it comes from (rqtp->tv_nsec) and where it will be + accessible in the function body (nsec) + + # perf trace --no-syscalls -e tools/perf/examples/bpf/5sec.c/call-graph=dwarf/ + 0.000 perf_bpf_probe:func:(ffffffff9811b5f0) tv_sec=5 + hrtimer_nanosleep ([kernel.kallsyms]) + __x64_sys_nanosleep ([kernel.kallsyms]) + do_syscall_64 ([kernel.kallsyms]) + entry_SYSCALL_64 ([kernel.kallsyms]) + __GI___nanosleep (/usr/lib64/libc-2.26.so) + rpl_nanosleep (/usr/bin/sleep) + xnanosleep (/usr/bin/sleep) + main (/usr/bin/sleep) + __libc_start_main (/usr/lib64/libc-2.26.so) + _start (/usr/bin/sleep) + ^C# + + Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com> +*/ + +#include <bpf.h> + +int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec) +{ + return sec == 5; +} + +license(GPL); diff --git a/tools/perf/examples/bpf/empty.c b/tools/perf/examples/bpf/empty.c new file mode 100644 index 000000000000..3776d26db9e7 --- /dev/null +++ b/tools/perf/examples/bpf/empty.c @@ -0,0 +1,3 @@ +#include <bpf.h> + +license(GPL); diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h new file mode 100644 index 000000000000..dd764ad5efdf --- /dev/null +++ b/tools/perf/include/bpf/bpf.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _PERF_BPF_H +#define _PERF_BPF_H +#define SEC(NAME) __attribute__((section(NAME), used)) + +#define probe(function, vars) \ + SEC(#function "=" #function " " #vars) function + +#define license(name) \ +char _license[] SEC("license") = #name; \ +int _version SEC("version") = LINUX_VERSION_CODE; + +#endif /* _PERF_BPF_H */ diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 20a08cb32332..51c81509a315 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -238,7 +238,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) (*argc)--; } else if (strstarts(cmd, CMD_DEBUGFS_DIR)) { tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR)); - fprintf(stderr, "dir: %s\n", tracing_path); + fprintf(stderr, "dir: %s\n", tracing_path_mount()); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--list-cmds")) { @@ -421,22 +421,11 @@ void pthread__unblock_sigwinch(void) pthread_sigmask(SIG_UNBLOCK, &set, NULL); } -#ifdef _SC_LEVEL1_DCACHE_LINESIZE -#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE) -#else -static void cache_line_size(int *cacheline_sizep) -{ - if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep)) - pr_debug("cannot determine cache line size"); -} -#endif - int main(int argc, const char **argv) { int err; const char *cmd; char sbuf[STRERR_BUFSIZE]; - int value; /* libsubcmd init */ exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT); @@ -444,13 +433,6 @@ int main(int argc, const char **argv) /* The page_size is placed in util object. */ page_size = sysconf(_SC_PAGE_SIZE); - cache_line_size(&cacheline_size); - - if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) - sysctl_perf_event_max_stack = value; - - if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0) - sysctl_perf_event_max_contexts_per_stack = value; cmd = extract_argv0_path(argv[0]); if (!cmd) @@ -458,15 +440,11 @@ int main(int argc, const char **argv) srandom(time(NULL)); - perf_config__init(); err = perf_config(perf_default_config, NULL); if (err) return err; set_buildid_dir(NULL); - /* get debugfs/tracefs mount point from /proc/mounts */ - tracing_path_mount(); - /* * "perf-xxxx" is the same as "perf xxxx", but we obviously: * diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index cac8f8889bc3..2bde505e2e7e 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -654,6 +654,15 @@ static int perf_test__list(int argc, const char **argv) continue; pr_info("%2d: %s\n", i, t->desc); + + if (t->subtest.get_nr) { + int subn = t->subtest.get_nr(); + int subi; + + for (subi = 0; subi < subn; subi++) + pr_info("%2d:%1d: %s\n", i, subi + 1, + t->subtest.get_desc(subi)); + } } perf_test__list_shell(argc, argv, i); diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 99936352df4f..afa4ce21ba7c 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -236,14 +236,13 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); - thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al); - if (!al.map || !al.map->dso) { + if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) { if (cpumode == PERF_RECORD_MISC_HYPERVISOR) { pr_debug("Hypervisor address can not be resolved - skipping\n"); return 0; } - pr_debug("thread__find_addr_map failed\n"); + pr_debug("thread__find_map failed\n"); return -1; } diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c index f7c5b613d667..b889a28fd80b 100644 --- a/tools/perf/tests/hists_common.c +++ b/tools/perf/tests/hists_common.c @@ -131,20 +131,20 @@ struct machine *setup_fake_machine(struct machines *machines) goto out; /* emulate dso__load() */ - dso__set_loaded(dso, MAP__FUNCTION); + dso__set_loaded(dso); for (k = 0; k < fake_symbols[i].nr_syms; k++) { struct symbol *sym; struct fake_sym *fsym = &fake_symbols[i].syms[k]; sym = symbol__new(fsym->start, fsym->length, - STB_GLOBAL, fsym->name); + STB_GLOBAL, STT_FUNC, fsym->name); if (sym == NULL) { dso__put(dso); goto out; } - symbols__insert(&dso->symbols[MAP__FUNCTION], sym); + symbols__insert(&dso->symbols, sym); } dso__put(dso); diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c index 868d82b501f4..b1af2499a3c9 100644 --- a/tools/perf/tests/mmap-thread-lookup.c +++ b/tools/perf/tests/mmap-thread-lookup.c @@ -188,9 +188,8 @@ static int mmap_events(synth_cb synth) pr_debug("looking for map %p\n", td->map); - thread__find_addr_map(thread, - PERF_RECORD_MISC_USER, MAP__FUNCTION, - (unsigned long) (td->map + 1), &al); + thread__find_map(thread, PERF_RECORD_MISC_USER, + (unsigned long) (td->map + 1), &al); thread__put(thread); @@ -218,7 +217,7 @@ static int mmap_events(synth_cb synth) * perf_event__synthesize_threads (global) * * We test we can find all memory maps via: - * thread__find_addr_map + * thread__find_map * * by using all thread objects. */ diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 18b06444f230..b9ebe15afb13 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1309,18 +1309,26 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist) return 0; } +static int test__intel_pt(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0); + return 0; +} + static int count_tracepoints(void) { struct dirent *events_ent; DIR *events_dir; int cnt = 0; - events_dir = opendir(tracing_events_path); + events_dir = tracing_events__opendir(); TEST_ASSERT_VAL("Can't open events dir", events_dir); while ((events_ent = readdir(events_dir))) { - char sys_path[PATH_MAX]; + char *sys_path; struct dirent *sys_ent; DIR *sys_dir; @@ -1331,8 +1339,8 @@ static int count_tracepoints(void) || !strcmp(events_ent->d_name, "header_page")) continue; - scnprintf(sys_path, PATH_MAX, "%s/%s", - tracing_events_path, events_ent->d_name); + sys_path = get_events_file(events_ent->d_name); + TEST_ASSERT_VAL("Can't get sys path", sys_path); sys_dir = opendir(sys_path); TEST_ASSERT_VAL("Can't open sys dir", sys_dir); @@ -1348,6 +1356,7 @@ static int count_tracepoints(void) } closedir(sys_dir); + put_events_file(sys_path); } closedir(events_dir); @@ -1637,6 +1646,11 @@ static struct evlist_test test__events[] = { .check = test__checkevent_config_cache, .id = 51, }, + { + .name = "intel_pt//u", + .check = test__intel_pt, + .id = 52, + }, }; static struct evlist_test test__events_pmu[] = { diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index ee86473643be..650b208f700f 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -16,18 +16,18 @@ nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254 trace_libc_inet_pton_backtrace() { idx=0 expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" - expected[1]=".*inet_pton[[:space:]]\($libc|inlined\)$" + expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" case "$(uname -m)" in s390x) eventattr='call-graph=dwarf,max-stack=4' - expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$" - expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$" - expected[4]="main[[:space:]]\(.*/bin/ping.*\)$" + expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" + expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" + expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" ;; *) eventattr='max-stack=3' - expected[2]="getaddrinfo[[:space:]]\($libc\)$" - expected[3]=".*\(.*/bin/ping.*\)$" + expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" + expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" ;; esac diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 1e5adb65632a..7691980b7df1 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -19,8 +19,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest struct symbol *sym; struct map *kallsyms_map, *vmlinux_map, *map; struct machine kallsyms, vmlinux; - enum map_type type = MAP__FUNCTION; - struct maps *maps = &vmlinux.kmaps.maps[type]; + struct maps *maps = machine__kernel_maps(&vmlinux); u64 mem_start, mem_end; bool header_printed; @@ -56,7 +55,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest * be compacted against the list of modules found in the "vmlinux" * code and with the one got from /proc/modules from the "kallsyms" code. */ - if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type) <= 0) { + if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) { pr_debug("dso__load_kallsyms "); goto out; } @@ -94,7 +93,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines * to fixup the symbols. */ - if (machine__load_vmlinux_path(&vmlinux, type) <= 0) { + if (machine__load_vmlinux_path(&vmlinux) <= 0) { pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n"); err = TEST_SKIP; goto out; @@ -108,7 +107,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest * in the kallsyms dso. For the ones that are in both, check its names and * end addresses too. */ - for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { + map__for_each_symbol(vmlinux_map, sym, nd) { struct symbol *pair, *first_pair; sym = rb_entry(nd, struct symbol, rb_node); @@ -119,8 +118,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start); mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end); - first_pair = machine__find_kernel_symbol(&kallsyms, type, - mem_start, NULL); + first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL); pair = first_pair; if (pair && UM(pair->start) == mem_start) { @@ -149,7 +147,7 @@ next_pair: */ continue; } else { - pair = machine__find_kernel_symbol_by_name(&kallsyms, type, sym->name, NULL); + pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL); if (pair) { if (UM(pair->start) == mem_start) goto next_pair; @@ -183,7 +181,7 @@ next_pair: * so use the short name, less descriptive but the same ("[kernel]" in * both cases. */ - pair = map_groups__find_by_name(&kallsyms.kmaps, type, + pair = map_groups__find_by_name(&kallsyms.kmaps, (map->dso->kernel ? map->dso->short_name : map->dso->name)); @@ -206,7 +204,7 @@ next_pair: mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end); - pair = map_groups__find(&kallsyms.kmaps, type, mem_start); + pair = map_groups__find(&kallsyms.kmaps, mem_start); if (pair == NULL || pair->priv) continue; @@ -228,7 +226,7 @@ next_pair: header_printed = false; - maps = &kallsyms.kmaps.maps[type]; + maps = machine__kernel_maps(&kallsyms); for (map = maps__first(maps); map; map = map__next(map)) { if (!map->priv) { diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 3781d74088a7..8be40fa903aa 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -695,6 +695,7 @@ static int annotate_browser__run(struct annotate_browser *browser, "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n" "s Toggle source code view\n" "t Circulate percent, total period, samples view\n" + "c Show min/max cycle\n" "/ Search string\n" "k Toggle line numbers\n" "P Print to [symbol_name].annotation file.\n" @@ -791,6 +792,13 @@ show_sup_ins: notes->options->show_total_period = true; annotation__update_column_widths(notes); continue; + case 'c': + if (notes->options->show_minmax_cycle) + notes->options->show_minmax_cycle = false; + else + notes->options->show_minmax_cycle = true; + annotation__update_column_widths(notes); + continue; case K_LEFT: case K_ESC: case 'q': diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c index e03fa75f108a..5b8b8c637686 100644 --- a/tools/perf/ui/browsers/map.c +++ b/tools/perf/ui/browsers/map.c @@ -104,7 +104,7 @@ int map__browse(struct map *map) { struct map_browser mb = { .b = { - .entries = &map->dso->symbols[map->type], + .entries = &map->dso->symbols, .refresh = ui_browser__rb_tree_refresh, .seek = ui_browser__rb_tree_seek, .write = map_browser__write, diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 6832fcb2e6ff..c1eb476da91b 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -819,8 +819,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, } if (h->ms.map == NULL && verbose > 1) { - __map_groups__fprintf_maps(h->thread->mg, - MAP__FUNCTION, fp); + map_groups__fprintf(h->thread->mg, fp); fprintf(fp, "%.10s end\n", graph_dotted_line); } } diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 8052373bcd6a..5d4c45b76895 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -152,6 +152,8 @@ libperf-y += perf-hooks.o libperf-$(CONFIG_CXX) += c++/ CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" +CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))" + # avoid compiler warnings in 32-bit mode CFLAGS_genelf_debug.o += -Wno-packed diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 5d74a30fe00f..71897689dacf 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -760,6 +760,15 @@ static int __symbol__account_cycles(struct annotation *notes, ch[offset].num_aggr++; ch[offset].cycles_aggr += cycles; + if (cycles > ch[offset].cycles_max) + ch[offset].cycles_max = cycles; + + if (ch[offset].cycles_min) { + if (cycles && cycles < ch[offset].cycles_min) + ch[offset].cycles_min = cycles; + } else + ch[offset].cycles_min = cycles; + if (!have_start && ch[offset].have_start) return 0; if (ch[offset].num) { @@ -953,8 +962,11 @@ void annotation__compute_ipc(struct annotation *notes, size_t size) if (ch->have_start) annotation__count_and_fill(notes, ch->start, offset, ch); al = notes->offsets[offset]; - if (al && ch->num_aggr) + if (al && ch->num_aggr) { al->cycles = ch->cycles_aggr / ch->num_aggr; + al->cycles_max = ch->cycles_max; + al->cycles_min = ch->cycles_min; + } notes->have_cycles = true; } } @@ -1953,6 +1965,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, u64 len; int width = symbol_conf.show_total_period ? 12 : 8; int graph_dotted_len; + char buf[512]; filename = strdup(dso->long_name); if (!filename) @@ -1965,8 +1978,11 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, len = symbol__size(sym); - if (perf_evsel__is_group_event(evsel)) + if (perf_evsel__is_group_event(evsel)) { width *= evsel->nr_members; + perf_evsel__group_desc(evsel, buf, sizeof(buf)); + evsel_name = buf; + } graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n", width, width, symbol_conf.show_total_period ? "Period" : @@ -2486,13 +2502,38 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati else obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); - if (al->cycles) - obj__printf(obj, "%*" PRIu64 " ", + if (!notes->options->show_minmax_cycle) { + if (al->cycles) + obj__printf(obj, "%*" PRIu64 " ", ANNOTATION__CYCLES_WIDTH - 1, al->cycles); - else if (!show_title) - obj__printf(obj, "%*s", ANNOTATION__CYCLES_WIDTH, " "); - else - obj__printf(obj, "%*s ", ANNOTATION__CYCLES_WIDTH - 1, "Cycle"); + else if (!show_title) + obj__printf(obj, "%*s", + ANNOTATION__CYCLES_WIDTH, " "); + else + obj__printf(obj, "%*s ", + ANNOTATION__CYCLES_WIDTH - 1, + "Cycle"); + } else { + if (al->cycles) { + char str[32]; + + scnprintf(str, sizeof(str), + "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")", + al->cycles, al->cycles_min, + al->cycles_max); + + obj__printf(obj, "%*s ", + ANNOTATION__MINMAX_CYCLES_WIDTH - 1, + str); + } else if (!show_title) + obj__printf(obj, "%*s", + ANNOTATION__MINMAX_CYCLES_WIDTH, + " "); + else + obj__printf(obj, "%*s ", + ANNOTATION__MINMAX_CYCLES_WIDTH - 1, + "Cycle(min/max)"); + } } obj__printf(obj, " "); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index f28a9e43421d..5080b6dd98b8 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -61,6 +61,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2); #define ANNOTATION__IPC_WIDTH 6 #define ANNOTATION__CYCLES_WIDTH 6 +#define ANNOTATION__MINMAX_CYCLES_WIDTH 19 struct annotation_options { bool hide_src_code, @@ -69,7 +70,8 @@ struct annotation_options { show_linenr, show_nr_jumps, show_nr_samples, - show_total_period; + show_total_period, + show_minmax_cycle; u8 offset_level; }; @@ -105,6 +107,8 @@ struct annotation_line { int jump_sources; float ipc; u64 cycles; + u64 cycles_max; + u64 cycles_min; size_t privsize; char *path; u32 idx; @@ -186,6 +190,8 @@ struct cyc_hist { u64 start; u64 cycles; u64 cycles_aggr; + u64 cycles_max; + u64 cycles_min; u32 num; u32 num_aggr; u8 have_start; @@ -239,6 +245,9 @@ struct annotation { static inline int annotation__cycles_width(struct annotation *notes) { + if (notes->have_cycles && notes->options->show_minmax_cycle) + return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH; + return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; } diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 857de69a5361..d056447520a2 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -1679,7 +1679,7 @@ struct sym_args { static bool kern_sym_match(struct sym_args *args, const char *name, char type) { /* A function with the same name, and global or the n'th found or any */ - return symbol_type__is_a(type, MAP__FUNCTION) && + return kallsyms__is_function(type) && !strcmp(name, args->name) && ((args->global && isupper(type)) || (args->selected && ++(args->cnt) == args->idx) || @@ -1784,7 +1784,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused, { struct sym_args *args = arg; - if (!symbol_type__is_a(type, MAP__FUNCTION)) + if (!kallsyms__is_function(type)) return 0; if (!args->started) { @@ -1915,7 +1915,7 @@ static void print_duplicate_syms(struct dso *dso, const char *sym_name) pr_err("Multiple symbols with name '%s'\n", sym_name); - sym = dso__first_symbol(dso, MAP__FUNCTION); + sym = dso__first_symbol(dso); while (sym) { if (dso_sym_match(sym, sym_name, &cnt, -1)) { pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", @@ -1945,7 +1945,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, *start = 0; *size = 0; - sym = dso__first_symbol(dso, MAP__FUNCTION); + sym = dso__first_symbol(dso); while (sym) { if (*start) { if (!*size) @@ -1972,8 +1972,8 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) { - struct symbol *first_sym = dso__first_symbol(dso, MAP__FUNCTION); - struct symbol *last_sym = dso__last_symbol(dso, MAP__FUNCTION); + struct symbol *first_sym = dso__first_symbol(dso); + struct symbol *last_sym = dso__last_symbol(dso); if (!first_sym || !last_sym) { pr_err("Failed to determine filter for %s\nNo symbols found.\n", diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 537eadd81914..04b1d53e4bf9 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -47,9 +47,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, return -1; } - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al); - - if (al.map != NULL) + if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) al.map->dso->hit = 1; thread__put(thread); diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 84eb9393c7db..5ac157056cdf 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -707,6 +707,14 @@ struct perf_config_set *perf_config_set__new(void) return set; } +static int perf_config__init(void) +{ + if (config_set == NULL) + config_set = perf_config_set__new(); + + return config_set == NULL; +} + int perf_config(config_fn_t fn, void *data) { int ret = 0; @@ -714,7 +722,7 @@ int perf_config(config_fn_t fn, void *data) struct perf_config_section *section; struct perf_config_item *item; - if (config_set == NULL) + if (config_set == NULL && perf_config__init()) return -1; perf_config_set__for_each_entry(config_set, section, item) { @@ -735,12 +743,6 @@ int perf_config(config_fn_t fn, void *data) return ret; } -void perf_config__init(void) -{ - if (config_set == NULL) - config_set = perf_config_set__new(); -} - void perf_config__exit(void) { perf_config_set__delete(config_set); diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h index baf82bf227ac..bd0a5897c76a 100644 --- a/tools/perf/util/config.h +++ b/tools/perf/util/config.h @@ -38,7 +38,6 @@ struct perf_config_set *perf_config_set__new(void); void perf_config_set__delete(struct perf_config_set *set); int perf_config_set__collect(struct perf_config_set *set, const char *file_name, const char *var, const char *value); -void perf_config__init(void); void perf_config__exit(void); void perf_config__refresh(void); diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index bf16dc9ee507..822ba915d144 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -270,9 +270,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address, thread = etmq->etm->unknown_thread; } - thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al); - - if (!al.map || !al.map->dso) + if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso) return 0; if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c index b0c2b5c5d337..7123746edcf4 100644 --- a/tools/perf/util/db-export.c +++ b/tools/perf/util/db-export.c @@ -247,9 +247,9 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al, *dso_db_id = dso->db_id; if (!al->sym) { - al->sym = symbol__new(al->addr, 0, 0, "unknown"); + al->sym = symbol__new(al->addr, 0, 0, 0, "unknown"); if (al->sym) - dso__insert_symbol(dso, al->map->type, al->sym); + dso__insert_symbol(dso, al->sym); } if (al->sym) { @@ -315,8 +315,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe, al.addr = node->ip; if (al.map && !al.sym) - al.sym = dso__find_symbol(al.map->dso, MAP__FUNCTION, - al.addr); + al.sym = dso__find_symbol(al.map->dso, al.addr); db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset); diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 36ef45b2e89d..cdfc2e5f55f5 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -1014,7 +1014,7 @@ struct map *dso__new_map(const char *name) struct dso *dso = dso__new(name); if (dso) - map = map__new2(0, dso, MAP__FUNCTION); + map = map__new2(0, dso); return map; } @@ -1176,19 +1176,19 @@ int dso__name_len(const struct dso *dso) return dso->short_name_len; } -bool dso__loaded(const struct dso *dso, enum map_type type) +bool dso__loaded(const struct dso *dso) { - return dso->loaded & (1 << type); + return dso->loaded; } -bool dso__sorted_by_name(const struct dso *dso, enum map_type type) +bool dso__sorted_by_name(const struct dso *dso) { - return dso->sorted_by_name & (1 << type); + return dso->sorted_by_name; } -void dso__set_sorted_by_name(struct dso *dso, enum map_type type) +void dso__set_sorted_by_name(struct dso *dso) { - dso->sorted_by_name |= (1 << type); + dso->sorted_by_name = true; } struct dso *dso__new(const char *name) @@ -1196,12 +1196,10 @@ struct dso *dso__new(const char *name) struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); if (dso != NULL) { - int i; strcpy(dso->name, name); dso__set_long_name(dso, dso->name, false); dso__set_short_name(dso, dso->name, false); - for (i = 0; i < MAP__NR_TYPES; ++i) - dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; + dso->symbols = dso->symbol_names = RB_ROOT; dso->data.cache = RB_ROOT; dso->inlined_nodes = RB_ROOT; dso->srclines = RB_ROOT; @@ -1231,8 +1229,6 @@ struct dso *dso__new(const char *name) void dso__delete(struct dso *dso) { - int i; - if (!RB_EMPTY_NODE(&dso->rb_node)) pr_err("DSO %s is still in rbtree when being deleted!\n", dso->long_name); @@ -1240,8 +1236,7 @@ void dso__delete(struct dso *dso) /* free inlines first, as they reference symbols */ inlines__tree_delete(&dso->inlined_nodes); srcline__tree_delete(&dso->srclines); - for (i = 0; i < MAP__NR_TYPES; ++i) - symbols__delete(&dso->symbols[i]); + symbols__delete(&dso->symbols); if (dso->short_name_allocated) { zfree((char **)&dso->short_name); @@ -1451,9 +1446,7 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp) size_t ret = 0; list_for_each_entry(pos, head, node) { - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - ret += dso__fprintf(pos, i, fp); + ret += dso__fprintf(pos, fp); } return ret; @@ -1467,18 +1460,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) return fprintf(fp, "%s", sbuild_id); } -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) +size_t dso__fprintf(struct dso *dso, FILE *fp) { struct rb_node *nd; size_t ret = fprintf(fp, "dso: %s (", dso->short_name); if (dso->short_name != dso->long_name) ret += fprintf(fp, "%s, ", dso->long_name); - ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], - dso__loaded(dso, type) ? "" : "NOT "); + ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); ret += dso__fprintf_buildid(dso, fp); ret += fprintf(fp, ")\n"); - for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index c229dbe0277a..ef69de2e69ea 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -140,14 +140,14 @@ struct dso { struct list_head node; struct rb_node rb_node; /* rbtree node sorted by long name */ struct rb_root *root; /* root of rbtree that rb_node is in */ - struct rb_root symbols[MAP__NR_TYPES]; - struct rb_root symbol_names[MAP__NR_TYPES]; + struct rb_root symbols; + struct rb_root symbol_names; struct rb_root inlined_nodes; struct rb_root srclines; struct { u64 addr; struct symbol *symbol; - } last_find_result[MAP__NR_TYPES]; + } last_find_result; void *a2l; char *symsrc_filename; unsigned int a2l_fails; @@ -164,8 +164,8 @@ struct dso { u8 short_name_allocated:1; u8 long_name_allocated:1; u8 is_64_bit:1; - u8 sorted_by_name; - u8 loaded; + bool sorted_by_name; + bool loaded; u8 rel; u8 build_id[BUILD_ID_SIZE]; u64 text_offset; @@ -202,14 +202,13 @@ struct dso { * @dso: the 'struct dso *' in which symbols itereated * @pos: the 'struct symbol *' to use as a loop cursor * @n: the 'struct rb_node *' to use as a temporary storage - * @type: the 'enum map_type' type of symbols */ -#define dso__for_each_symbol(dso, pos, n, type) \ - symbols__for_each_entry(&(dso)->symbols[(type)], pos, n) +#define dso__for_each_symbol(dso, pos, n) \ + symbols__for_each_entry(&(dso)->symbols, pos, n) -static inline void dso__set_loaded(struct dso *dso, enum map_type type) +static inline void dso__set_loaded(struct dso *dso) { - dso->loaded |= (1 << type); + dso->loaded = true; } struct dso *dso__new(const char *name); @@ -231,11 +230,16 @@ static inline void __dso__zput(struct dso **dso) #define dso__zput(dso) __dso__zput(&dso) -bool dso__loaded(const struct dso *dso, enum map_type type); +bool dso__loaded(const struct dso *dso); -bool dso__sorted_by_name(const struct dso *dso, enum map_type type); -void dso__set_sorted_by_name(struct dso *dso, enum map_type type); -void dso__sort_by_name(struct dso *dso, enum map_type type); +static inline bool dso__has_symbols(const struct dso *dso) +{ + return !RB_EMPTY_ROOT(&dso->symbols); +} + +bool dso__sorted_by_name(const struct dso *dso); +void dso__set_sorted_by_name(struct dso *dso); +void dso__sort_by_name(struct dso *dso); void dso__set_build_id(struct dso *dso, void *build_id); bool dso__build_id_equal(const struct dso *dso, u8 *build_id); @@ -349,9 +353,8 @@ size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, size_t __dsos__fprintf(struct list_head *head, FILE *fp); size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); -size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp); -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); +size_t dso__fprintf_symbols_by_name(struct dso *dso, FILE *fp); +size_t dso__fprintf(struct dso *dso, FILE *fp); static inline bool dso__is_vmlinux(struct dso *dso) { diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 4c842762e3f2..59f38c7693f8 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -93,6 +93,37 @@ int perf_env__read_cpu_topology_map(struct perf_env *env) return 0; } +static int perf_env__read_arch(struct perf_env *env) +{ + struct utsname uts; + + if (env->arch) + return 0; + + if (!uname(&uts)) + env->arch = strdup(uts.machine); + + return env->arch ? 0 : -ENOMEM; +} + +static int perf_env__read_nr_cpus_avail(struct perf_env *env) +{ + if (env->nr_cpus_avail == 0) + env->nr_cpus_avail = cpu__max_present_cpu(); + + return env->nr_cpus_avail ? 0 : -ENOENT; +} + +const char *perf_env__raw_arch(struct perf_env *env) +{ + return env && !perf_env__read_arch(env) ? env->arch : "unknown"; +} + +int perf_env__nr_cpus_avail(struct perf_env *env) +{ + return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0; +} + void cpu_cache_level__free(struct cpu_cache_level *cache) { free(cache->type); diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index c4ef2e523367..1f3ccc368530 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -76,4 +76,7 @@ int perf_env__read_cpu_topology_map(struct perf_env *env); void cpu_cache_level__free(struct cpu_cache_level *cache); const char *perf_env__arch(struct perf_env *env); +const char *perf_env__raw_arch(struct perf_env *env); +int perf_env__nr_cpus_avail(struct perf_env *env); + #endif /* __PERF_ENV_H */ diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 98ff3a6a3d50..0c8ecf0c78a4 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -88,10 +88,10 @@ static const char *perf_ns__name(unsigned int id) return perf_ns__names[id]; } -static int perf_tool__process_synth_event(struct perf_tool *tool, - union perf_event *event, - struct machine *machine, - perf_event__handler_t process) +int perf_tool__process_synth_event(struct perf_tool *tool, + union perf_event *event, + struct machine *machine, + perf_event__handler_t process) { struct perf_sample synth_sample = { .pid = -1, @@ -464,8 +464,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, { int rc = 0; struct map *pos; - struct map_groups *kmaps = &machine->kmaps; - struct maps *maps = &kmaps->maps[MAP__FUNCTION]; + struct maps *maps = machine__kernel_maps(machine); union perf_event *event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); if (event == NULL) { @@ -488,7 +487,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, for (pos = maps__first(maps); pos; pos = map__next(pos)) { size_t size; - if (__map__is_kernel(pos)) + if (!__map__is_kmodule(pos)) continue; size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); @@ -869,7 +868,7 @@ static int find_symbol_cb(void *arg, const char *name, char type, * Must be a function or at least an alias, as in PARISC64, where "_text" is * an 'A' to the same address as "_stext". */ - if (!(symbol_type__is_a(type, MAP__FUNCTION) || + if (!(kallsyms__is_function(type) || type == 'A') || strcmp(name, args->name)) return 0; @@ -889,9 +888,16 @@ int kallsyms__get_function_start(const char *kallsyms_filename, return 0; } -int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine) +int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused, + perf_event__handler_t process __maybe_unused, + struct machine *machine __maybe_unused) +{ + return 0; +} + +static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) { size_t size; struct map *map = machine__kernel_map(machine); @@ -944,6 +950,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, return err; } +int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + int err; + + err = __perf_event__synthesize_kernel_mmap(tool, process, machine); + if (err < 0) + return err; + + return perf_event__synthesize_extra_kmaps(tool, process, machine); +} + int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, @@ -1489,9 +1508,8 @@ int perf_event__process(struct perf_tool *tool __maybe_unused, return machine__process_event(machine, event, sample); } -void thread__find_addr_map(struct thread *thread, u8 cpumode, - enum map_type type, u64 addr, - struct addr_location *al) +struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, + struct addr_location *al) { struct map_groups *mg = thread->mg; struct machine *machine = mg->machine; @@ -1505,7 +1523,7 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode, if (machine == NULL) { al->map = NULL; - return; + return NULL; } if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { @@ -1533,10 +1551,10 @@ void thread__find_addr_map(struct thread *thread, u8 cpumode, !perf_host) al->filtered |= (1 << HIST_FILTER__HOST); - return; + return NULL; } try_again: - al->map = map_groups__find(mg, type, al->addr); + al->map = map_groups__find(mg, al->addr); if (al->map == NULL) { /* * If this is outside of all known maps, and is a negative @@ -1563,17 +1581,17 @@ try_again: map__load(al->map); al->addr = al->map->map_ip(al->map, al->addr); } + + return al->map; } -void thread__find_addr_location(struct thread *thread, - u8 cpumode, enum map_type type, u64 addr, - struct addr_location *al) +struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, + u64 addr, struct addr_location *al) { - thread__find_addr_map(thread, cpumode, type, addr, al); - if (al->map != NULL) + al->sym = NULL; + if (thread__find_map(thread, cpumode, addr, al)) al->sym = map__find_symbol(al->map, al->addr); - else - al->sym = NULL; + return al->sym; } /* @@ -1590,7 +1608,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al, return -1; dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); + thread__find_map(thread, sample->cpumode, sample->ip, al); dump_printf(" ...... dso: %s\n", al->map ? al->map->dso->long_name : al->level == 'H' ? "[hypervisor]" : "<not found>"); @@ -1669,10 +1687,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr) void thread__resolve(struct thread *thread, struct addr_location *al, struct perf_sample *sample) { - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); - if (!al->map) - thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE, - sample->addr, al); + thread__find_map(thread, sample->cpumode, sample->addr, al); al->cpu = sample->cpu; al->sym = NULL; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 0f794744919c..bfa60bcafbde 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -750,6 +750,10 @@ int perf_event__process_exit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine); +int perf_tool__process_synth_event(struct perf_tool *tool, + union perf_event *event, + struct machine *machine, + perf_event__handler_t process); int perf_event__process(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, @@ -796,6 +800,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, bool mmap_data, unsigned int proc_map_timeout); +int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine); + size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index a59281d64368..e7a4b31a84fb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1795,3 +1795,18 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist) return true; } + +/* + * Events in data file are not collect in groups, but we still want + * the group display. Set the artificial group and set the leader's + * forced_leader flag to notify the display code. + */ +void perf_evlist__force_leader(struct perf_evlist *evlist) +{ + if (!evlist->nr_groups) { + struct perf_evsel *leader = perf_evlist__first(evlist); + + perf_evlist__set_leader(evlist); + leader->forced_leader = true; + } +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 6c41b2f78713..dc66436add98 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -309,4 +309,7 @@ struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, union perf_event *event); bool perf_evlist__exclude_kernel(struct perf_evlist *evlist); + +void perf_evlist__force_leader(struct perf_evlist *evlist); + #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 4cd2cf93f726..150db5ed7400 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2862,7 +2862,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, return scnprintf(msg, size, "Not enough memory to setup event with callchain.\n" "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n" - "Hint: Current value: %d", sysctl_perf_event_max_stack); + "Hint: Current value: %d", sysctl__max_stack()); break; case ENODEV: if (target->cpu_list) diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index c540d47583e7..aafbe54fd3fa 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -114,7 +114,7 @@ gen_build_id(struct buildid_note *note, fd = open("/dev/urandom", O_RDONLY); if (fd == -1) - err(1, "cannot access /dev/urandom for builid"); + err(1, "cannot access /dev/urandom for buildid"); sret = read(fd, note->build_id, sz); diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index 72db2744876d..7f0c83b6332b 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c @@ -335,8 +335,7 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip) if (!thread) return -1; - thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al); - if (!al.map || !al.map->dso) + if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) goto out_put; len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 0effaff57020..492986a25ef6 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -442,8 +442,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, } while (1) { - thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al); - if (!al.map || !al.map->dso) + if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) return -EINVAL; if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && @@ -596,8 +595,7 @@ static int __intel_pt_pgd_ip(uint64_t ip, void *data) if (!thread) return -EINVAL; - thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al); - if (!al.map || !al.map->dso) + if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) return -EINVAL; offset = al.map->map_ip(al.map, ip); @@ -1565,7 +1563,7 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) if (map__load(map)) return 0; - start = dso__first_symbol(map->dso, MAP__FUNCTION); + start = dso__first_symbol(map->dso); for (sym = start; sym; sym = dso__next_symbol(sym)) { if (sym->binding == STB_GLOBAL && diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 1cca0a2fa641..976e658e38dc 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -14,11 +14,12 @@ #include "config.h" #include "util.h" #include <sys/wait.h> +#include <subcmd/exec-cmd.h> #define CLANG_BPF_CMD_DEFAULT_TEMPLATE \ "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\ "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \ - "$CLANG_OPTIONS $KERNEL_INC_OPTIONS " \ + "$CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS " \ "-Wno-unused-value -Wno-pointer-sign " \ "-working-directory $WORKING_DIR " \ "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -" @@ -212,7 +213,7 @@ version_notice(void) " \t\thttp://llvm.org/apt\n\n" " \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n" " \toption in [llvm] section of ~/.perfconfig to:\n\n" -" \t \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS \\\n" +" \t \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS \\\n" " \t -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n" " \t -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n" " \t(Replace /path/to/llc with path to your llc)\n\n" @@ -431,9 +432,11 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, const char *clang_opt = llvm_param.clang_opt; char clang_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64]; char serr[STRERR_BUFSIZE]; - char *kbuild_dir = NULL, *kbuild_include_opts = NULL; + char *kbuild_dir = NULL, *kbuild_include_opts = NULL, + *perf_bpf_include_opts = NULL; const char *template = llvm_param.clang_bpf_cmd_template; - char *command_echo, *command_out; + char *command_echo = NULL, *command_out; + char *perf_include_dir = system_path(PERF_INCLUDE_DIR); if (path[0] != '-' && realpath(path, abspath) == NULL) { err = errno; @@ -471,12 +474,14 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, snprintf(linux_version_code_str, sizeof(linux_version_code_str), "0x%x", kernel_version); - + if (asprintf(&perf_bpf_include_opts, "-I%s/bpf", perf_include_dir) < 0) + goto errout; force_set_env("NR_CPUS", nr_cpus_avail_str); force_set_env("LINUX_VERSION_CODE", linux_version_code_str); force_set_env("CLANG_EXEC", clang_path); force_set_env("CLANG_OPTIONS", clang_opt); force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts); + force_set_env("PERF_BPF_INC_OPTIONS", perf_bpf_include_opts); force_set_env("WORKING_DIR", kbuild_dir ? : "."); /* @@ -512,6 +517,8 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, free(command_out); free(kbuild_dir); free(kbuild_include_opts); + free(perf_bpf_include_opts); + free(perf_include_dir); if (!p_obj_buf) free(obj_buf); @@ -526,6 +533,8 @@ errout: free(kbuild_dir); free(kbuild_include_opts); free(obj_buf); + free(perf_bpf_include_opts); + free(perf_include_dir); if (p_obj_buf) *p_obj_buf = NULL; if (p_obj_buf_sz) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 32d50492505d..e7b4a8b513f2 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -24,6 +24,7 @@ #include "sane_ctype.h" #include <symbol/kallsyms.h> +#include <linux/mman.h> static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); @@ -81,8 +82,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) machine->kptr_restrict_warned = false; machine->comm_exec = false; machine->kernel_start = 0; - - memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps)); + machine->vmlinux_map = NULL; machine->root_dir = strdup(root_dir); if (machine->root_dir == NULL) @@ -137,13 +137,11 @@ struct machine *machine__new_kallsyms(void) struct machine *machine = machine__new_host(); /* * FIXME: - * 1) MAP__FUNCTION will go away when we stop loading separate maps for - * functions and data objects. - * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely + * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely * ask for not using the kcore parsing code, once this one is fixed * to create a map per module. */ - if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) { + if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { machine__delete(machine); machine = NULL; } @@ -673,8 +671,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start, if (kmod_path__parse_name(&m, filename)) return NULL; - map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, - m.name); + map = map_groups__find_by_name(&machine->kmaps, m.name); if (map) { /* * If the map's dso is an offline module, give dso__load() @@ -689,7 +686,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start, if (dso == NULL) goto out; - map = map__new2(start, dso, MAP__FUNCTION); + map = map__new2(start, dso); if (map == NULL) goto out; @@ -810,8 +807,8 @@ struct process_args { u64 start; }; -static void machine__get_kallsyms_filename(struct machine *machine, char *buf, - size_t bufsz) +void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) { if (machine__is_default_guest(machine)) scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); @@ -854,65 +851,171 @@ static int machine__get_running_kernel_start(struct machine *machine, return 0; } +int machine__create_extra_kernel_map(struct machine *machine, + struct dso *kernel, + struct extra_kernel_map *xm) +{ + struct kmap *kmap; + struct map *map; + + map = map__new2(xm->start, kernel); + if (!map) + return -1; + + map->end = xm->end; + map->pgoff = xm->pgoff; + + kmap = map__kmap(map); + + kmap->kmaps = &machine->kmaps; + strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); + + map_groups__insert(&machine->kmaps, map); + + pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", + kmap->name, map->start, map->end); + + map__put(map); + + return 0; +} + +static u64 find_entry_trampoline(struct dso *dso) +{ + /* Duplicates are removed so lookup all aliases */ + const char *syms[] = { + "_entry_trampoline", + "__entry_trampoline_start", + "entry_SYSCALL_64_trampoline", + }; + struct symbol *sym = dso__first_symbol(dso); + unsigned int i; + + for (; sym; sym = dso__next_symbol(sym)) { + if (sym->binding != STB_GLOBAL) + continue; + for (i = 0; i < ARRAY_SIZE(syms); i++) { + if (!strcmp(sym->name, syms[i])) + return sym->start; + } + } + + return 0; +} + +/* + * These values can be used for kernels that do not have symbols for the entry + * trampolines in kallsyms. + */ +#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL +#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 +#define X86_64_ENTRY_TRAMPOLINE 0x6000 + +/* Map x86_64 PTI entry trampolines */ +int machine__map_x86_64_entry_trampolines(struct machine *machine, + struct dso *kernel) +{ + struct map_groups *kmaps = &machine->kmaps; + struct maps *maps = &kmaps->maps; + int nr_cpus_avail, cpu; + bool found = false; + struct map *map; + u64 pgoff; + + /* + * In the vmlinux case, pgoff is a virtual address which must now be + * mapped to a vmlinux offset. + */ + for (map = maps__first(maps); map; map = map__next(map)) { + struct kmap *kmap = __map__kmap(map); + struct map *dest_map; + + if (!kmap || !is_entry_trampoline(kmap->name)) + continue; + + dest_map = map_groups__find(kmaps, map->pgoff); + if (dest_map != map) + map->pgoff = dest_map->map_ip(dest_map, map->pgoff); + found = true; + } + if (found || machine->trampolines_mapped) + return 0; + + pgoff = find_entry_trampoline(kernel); + if (!pgoff) + return 0; + + nr_cpus_avail = machine__nr_cpus_avail(machine); + + /* Add a 1 page map for each CPU's entry trampoline */ + for (cpu = 0; cpu < nr_cpus_avail; cpu++) { + u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU + + cpu * X86_64_CPU_ENTRY_AREA_SIZE + + X86_64_ENTRY_TRAMPOLINE; + struct extra_kernel_map xm = { + .start = va, + .end = va + page_size, + .pgoff = pgoff, + }; + + strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN); + + if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) + return -1; + } + + machine->trampolines_mapped = nr_cpus_avail; + + return 0; +} + +int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, + struct dso *kernel __maybe_unused) +{ + return 0; +} + static int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { - int type; + struct kmap *kmap; + struct map *map; /* In case of renewal the kernel map, destroy previous one */ machine__destroy_kernel_maps(machine); - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct kmap *kmap; - struct map *map; - - machine->vmlinux_maps[type] = map__new2(0, kernel, type); - if (machine->vmlinux_maps[type] == NULL) - return -1; + machine->vmlinux_map = map__new2(0, kernel); + if (machine->vmlinux_map == NULL) + return -1; - machine->vmlinux_maps[type]->map_ip = - machine->vmlinux_maps[type]->unmap_ip = - identity__map_ip; - map = __machine__kernel_map(machine, type); - kmap = map__kmap(map); - if (!kmap) - return -1; + machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip; + map = machine__kernel_map(machine); + kmap = map__kmap(map); + if (!kmap) + return -1; - kmap->kmaps = &machine->kmaps; - map_groups__insert(&machine->kmaps, map); - } + kmap->kmaps = &machine->kmaps; + map_groups__insert(&machine->kmaps, map); return 0; } void machine__destroy_kernel_maps(struct machine *machine) { - int type; - - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct kmap *kmap; - struct map *map = __machine__kernel_map(machine, type); - - if (map == NULL) - continue; + struct kmap *kmap; + struct map *map = machine__kernel_map(machine); - kmap = map__kmap(map); - map_groups__remove(&machine->kmaps, map); - if (kmap && kmap->ref_reloc_sym) { - /* - * ref_reloc_sym is shared among all maps, so free just - * on one of them. - */ - if (type == MAP__FUNCTION) { - zfree((char **)&kmap->ref_reloc_sym->name); - zfree(&kmap->ref_reloc_sym); - } else - kmap->ref_reloc_sym = NULL; - } + if (map == NULL) + return; - map__put(machine->vmlinux_maps[type]); - machine->vmlinux_maps[type] = NULL; + kmap = map__kmap(map); + map_groups__remove(&machine->kmaps, map); + if (kmap && kmap->ref_reloc_sym) { + zfree((char **)&kmap->ref_reloc_sym->name); + zfree(&kmap->ref_reloc_sym); } + + map__zput(machine->vmlinux_map); } int machines__create_guest_kernel_maps(struct machines *machines) @@ -989,32 +1092,31 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid) return machine__create_kernel_maps(machine); } -int machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type) +int machine__load_kallsyms(struct machine *machine, const char *filename) { struct map *map = machine__kernel_map(machine); int ret = __dso__load_kallsyms(map->dso, filename, map, true); if (ret > 0) { - dso__set_loaded(map->dso, type); + dso__set_loaded(map->dso); /* * Since /proc/kallsyms will have multiple sessions for the * kernel, with modules between them, fixup the end of all * sections. */ - __map_groups__fixup_end(&machine->kmaps, type); + map_groups__fixup_end(&machine->kmaps); } return ret; } -int machine__load_vmlinux_path(struct machine *machine, enum map_type type) +int machine__load_vmlinux_path(struct machine *machine) { struct map *map = machine__kernel_map(machine); int ret = dso__load_vmlinux_path(map->dso, map); if (ret > 0) - dso__set_loaded(map->dso, type); + dso__set_loaded(map->dso); return ret; } @@ -1055,10 +1157,9 @@ static bool is_kmod_dso(struct dso *dso) static int map_groups__set_module_path(struct map_groups *mg, const char *path, struct kmod_path *m) { - struct map *map; char *long_name; + struct map *map = map_groups__find_by_name(mg, m->name); - map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name); if (map == NULL) return 0; @@ -1207,19 +1308,14 @@ static int machine__create_modules(struct machine *machine) static void machine__set_kernel_mmap(struct machine *machine, u64 start, u64 end) { - int i; - - for (i = 0; i < MAP__NR_TYPES; i++) { - machine->vmlinux_maps[i]->start = start; - machine->vmlinux_maps[i]->end = end; - - /* - * Be a bit paranoid here, some perf.data file came with - * a zero sized synthesized MMAP event for the kernel. - */ - if (start == 0 && end == 0) - machine->vmlinux_maps[i]->end = ~0ULL; - } + machine->vmlinux_map->start = start; + machine->vmlinux_map->end = end; + /* + * Be a bit paranoid here, some perf.data file came with + * a zero sized synthesized MMAP event for the kernel. + */ + if (start == 0 && end == 0) + machine->vmlinux_map->end = ~0ULL; } int machine__create_kernel_maps(struct machine *machine) @@ -1234,9 +1330,8 @@ int machine__create_kernel_maps(struct machine *machine) return -1; ret = __machine__create_kernel_maps(machine, kernel); - dso__put(kernel); if (ret < 0) - return -1; + goto out_put; if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { if (machine__is_host(machine)) @@ -1249,9 +1344,10 @@ int machine__create_kernel_maps(struct machine *machine) if (!machine__get_running_kernel_start(machine, &name, &addr)) { if (name && - maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { + map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) { machine__destroy_kernel_maps(machine); - return -1; + ret = -1; + goto out_put; } /* we have a real start address now, so re-order the kmaps */ @@ -1267,12 +1363,16 @@ int machine__create_kernel_maps(struct machine *machine) map__put(map); } + if (machine__create_extra_kernel_maps(machine, kernel)) + pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); + /* update end address of the kernel map using adjacent module address */ map = map__next(machine__kernel_map(machine)); if (map) machine__set_kernel_mmap(machine, addr, map->start); - - return 0; +out_put: + dso__put(kernel); + return ret; } static bool machine__uses_kcore(struct machine *machine) @@ -1287,6 +1387,32 @@ static bool machine__uses_kcore(struct machine *machine) return false; } +static bool perf_event__is_extra_kernel_mmap(struct machine *machine, + union perf_event *event) +{ + return machine__is(machine, "x86_64") && + is_entry_trampoline(event->mmap.filename); +} + +static int machine__process_extra_kernel_map(struct machine *machine, + union perf_event *event) +{ + struct map *kernel_map = machine__kernel_map(machine); + struct dso *kernel = kernel_map ? kernel_map->dso : NULL; + struct extra_kernel_map xm = { + .start = event->mmap.start, + .end = event->mmap.start + event->mmap.len, + .pgoff = event->mmap.pgoff, + }; + + if (kernel == NULL) + return -1; + + strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); + + return machine__create_extra_kernel_map(machine, kernel, &xm); +} + static int machine__process_kernel_mmap_event(struct machine *machine, union perf_event *event) { @@ -1379,9 +1505,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine, * time /proc/sys/kernel/kptr_restrict was non zero. */ if (event->mmap.pgoff != 0) { - maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, - symbol_name, - event->mmap.pgoff); + map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, + symbol_name, + event->mmap.pgoff); } if (machine__is_default_guest(machine)) { @@ -1390,6 +1516,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine, */ dso__load(kernel, machine__kernel_map(machine)); } + } else if (perf_event__is_extra_kernel_mmap(machine, event)) { + return machine__process_extra_kernel_map(machine, event); } return 0; out_problem: @@ -1402,7 +1530,6 @@ int machine__process_mmap2_event(struct machine *machine, { struct thread *thread; struct map *map; - enum map_type type; int ret = 0; if (dump_trace) @@ -1421,11 +1548,6 @@ int machine__process_mmap2_event(struct machine *machine, if (thread == NULL) goto out_problem; - if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) - type = MAP__VARIABLE; - else - type = MAP__FUNCTION; - map = map__new(machine, event->mmap2.start, event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, @@ -1433,7 +1555,7 @@ int machine__process_mmap2_event(struct machine *machine, event->mmap2.ino_generation, event->mmap2.prot, event->mmap2.flags, - event->mmap2.filename, type, thread); + event->mmap2.filename, thread); if (map == NULL) goto out_problem_map; @@ -1460,7 +1582,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event { struct thread *thread; struct map *map; - enum map_type type; + u32 prot = 0; int ret = 0; if (dump_trace) @@ -1479,16 +1601,14 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event if (thread == NULL) goto out_problem; - if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) - type = MAP__VARIABLE; - else - type = MAP__FUNCTION; + if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA)) + prot = PROT_EXEC; map = map__new(machine, event->mmap.start, event->mmap.len, event->mmap.pgoff, - 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, prot, 0, event->mmap.filename, - type, thread); + thread); if (map == NULL) goto out_problem_map; @@ -1664,7 +1784,7 @@ static void ip__resolve_ams(struct thread *thread, * Thus, we have to try consecutively until we find a match * or else, the symbol is unknown */ - thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); + thread__find_cpumode_addr_location(thread, ip, &al); ams->addr = ip; ams->al_addr = al.addr; @@ -1681,15 +1801,7 @@ static void ip__resolve_data(struct thread *thread, memset(&al, 0, sizeof(al)); - thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); - if (al.map == NULL) { - /* - * some shared data regions have execute bit set which puts - * their mapping in the MAP__FUNCTION type array. - * Check there as a fallback option before dropping the sample. - */ - thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); - } + thread__find_symbol(thread, m, addr, &al); ams->addr = addr; ams->al_addr = al.addr; @@ -1758,8 +1870,7 @@ static int add_callchain_ip(struct thread *thread, al.filtered = 0; al.sym = NULL; if (!cpumode) { - thread__find_cpumode_addr_location(thread, MAP__FUNCTION, - ip, &al); + thread__find_cpumode_addr_location(thread, ip, &al); } else { if (ip >= PERF_CONTEXT_MAX) { switch (ip) { @@ -1784,8 +1895,7 @@ static int add_callchain_ip(struct thread *thread, } return 0; } - thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, - ip, &al); + thread__find_symbol(thread, *cpumode, ip, &al); } if (al.sym != NULL) { @@ -1810,7 +1920,7 @@ static int add_callchain_ip(struct thread *thread, } srcline = callchain_srcline(al.map, al.sym, al.addr); - return callchain_cursor_append(cursor, al.addr, al.map, al.sym, + return callchain_cursor_append(cursor, ip, al.map, al.sym, branch, flags, nr_loop_iter, iter_cycles, branch_from, srcline); } @@ -2342,6 +2452,20 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, return 0; } +/* + * Compares the raw arch string. N.B. see instead perf_env__arch() if a + * normalized arch is needed. + */ +bool machine__is(struct machine *machine, const char *arch) +{ + return machine && !strcmp(perf_env__raw_arch(machine->env), arch); +} + +int machine__nr_cpus_avail(struct machine *machine) +{ + return machine ? perf_env__nr_cpus_avail(machine->env) : 0; +} + int machine__get_kernel_start(struct machine *machine) { struct map *map = machine__kernel_map(machine); @@ -2358,7 +2482,12 @@ int machine__get_kernel_start(struct machine *machine) machine->kernel_start = 1ULL << 63; if (map) { err = map__load(map); - if (!err) + /* + * On x86_64, PTI entry trampolines are less than the + * start of kernel text, but still above 2^63. So leave + * kernel_start = 1ULL << 63 for x86_64. + */ + if (!err && !machine__is(machine, "x86_64")) machine->kernel_start = map->start; } return err; @@ -2373,7 +2502,7 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch { struct machine *machine = vmachine; struct map *map; - struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map); + struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map); if (sym == NULL) return NULL; diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 66cc200ef86f..1de7660d93e9 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -49,13 +49,14 @@ struct machine { struct perf_env *env; struct dsos dsos; struct map_groups kmaps; - struct map *vmlinux_maps[MAP__NR_TYPES]; + struct map *vmlinux_map; u64 kernel_start; pid_t *current_tid; union { /* Tool specific area */ void *priv; u64 db_id; }; + bool trampolines_mapped; }; static inline struct threads *machine__threads(struct machine *machine, pid_t tid) @@ -64,16 +65,22 @@ static inline struct threads *machine__threads(struct machine *machine, pid_t ti return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE]; } +/* + * The main kernel (vmlinux) map + */ static inline -struct map *__machine__kernel_map(struct machine *machine, enum map_type type) +struct map *machine__kernel_map(struct machine *machine) { - return machine->vmlinux_maps[type]; + return machine->vmlinux_map; } +/* + * kernel (the one returned by machine__kernel_map()) plus kernel modules maps + */ static inline -struct map *machine__kernel_map(struct machine *machine) +struct maps *machine__kernel_maps(struct machine *machine) { - return __machine__kernel_map(machine, MAP__FUNCTION); + return &machine->kmaps.maps; } int machine__get_kernel_start(struct machine *machine); @@ -182,6 +189,9 @@ static inline bool machine__is_host(struct machine *machine) return machine ? machine->pid == HOST_KERNEL_ID : false; } +bool machine__is(struct machine *machine, const char *arch); +int machine__nr_cpus_avail(struct machine *machine); + struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); @@ -190,44 +200,27 @@ struct dso *machine__findnew_dso(struct machine *machine, const char *filename); size_t machine__fprintf(struct machine *machine, FILE *fp); static inline -struct symbol *machine__find_kernel_symbol(struct machine *machine, - enum map_type type, u64 addr, +struct symbol *machine__find_kernel_symbol(struct machine *machine, u64 addr, struct map **mapp) { - return map_groups__find_symbol(&machine->kmaps, type, addr, mapp); + return map_groups__find_symbol(&machine->kmaps, addr, mapp); } static inline struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine, - enum map_type type, const char *name, + const char *name, struct map **mapp) { - return map_groups__find_symbol_by_name(&machine->kmaps, type, name, mapp); -} - -static inline -struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr, - struct map **mapp) -{ - return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr, - mapp); -} - -static inline -struct symbol *machine__find_kernel_function_by_name(struct machine *machine, - const char *name, - struct map **mapp) -{ - return map_groups__find_function_by_name(&machine->kmaps, name, mapp); + return map_groups__find_symbol_by_name(&machine->kmaps, name, mapp); } struct map *machine__findnew_module_map(struct machine *machine, u64 start, const char *filename); int arch__fix_module_text_start(u64 *start, const char *name); -int machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type); -int machine__load_vmlinux_path(struct machine *machine, enum map_type type); +int machine__load_kallsyms(struct machine *machine, const char *filename); + +int machine__load_vmlinux_path(struct machine *machine); size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm); @@ -276,4 +269,25 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, */ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp); +void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz); + +int machine__create_extra_kernel_maps(struct machine *machine, + struct dso *kernel); + +/* Kernel-space maps for symbols that are outside the main kernel map and module maps */ +struct extra_kernel_map { + u64 start; + u64 end; + u64 pgoff; + char name[KMAP_NAME_LEN]; +}; + +int machine__create_extra_kernel_map(struct machine *machine, + struct dso *kernel, + struct extra_kernel_map *xm); + +int machine__map_x86_64_entry_trampolines(struct machine *machine, + struct dso *kernel); + #endif /* __PERF_MACHINE_H */ diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 8fe57031e1a8..6ae97eda370b 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -22,11 +22,6 @@ static void __maps__insert(struct maps *maps, struct map *map); -const char *map_type__name[MAP__NR_TYPES] = { - [MAP__FUNCTION] = "Functions", - [MAP__VARIABLE] = "Variables", -}; - static inline int is_anon_memory(const char *filename, u32 flags) { return flags & MAP_HUGETLB || @@ -129,10 +124,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) return false; } -void map__init(struct map *map, enum map_type type, - u64 start, u64 end, u64 pgoff, struct dso *dso) +void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) { - map->type = type; map->start = start; map->end = end; map->pgoff = pgoff; @@ -149,7 +142,7 @@ void map__init(struct map *map, enum map_type type, struct map *map__new(struct machine *machine, u64 start, u64 len, u64 pgoff, u32 d_maj, u32 d_min, u64 ino, u64 ino_gen, u32 prot, u32 flags, char *filename, - enum map_type type, struct thread *thread) + struct thread *thread) { struct map *map = malloc(sizeof(*map)); struct nsinfo *nsi = NULL; @@ -173,7 +166,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, map->flags = flags; nsi = nsinfo__get(thread->nsinfo); - if ((anon || no_dso) && nsi && type == MAP__FUNCTION) { + if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", nsi->pid); filename = newfilename; @@ -203,7 +196,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (dso == NULL) goto out_delete; - map__init(map, type, start, start + len, pgoff, dso); + map__init(map, start, start + len, pgoff, dso); if (anon || no_dso) { map->map_ip = map->unmap_ip = identity__map_ip; @@ -213,8 +206,8 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, * functions still return NULL, and we avoid the * unnecessary map__load warning. */ - if (type != MAP__FUNCTION) - dso__set_loaded(dso, map->type); + if (!(prot & PROT_EXEC)) + dso__set_loaded(dso); } dso->nsinfo = nsi; dso__put(dso); @@ -231,7 +224,7 @@ out_delete: * they are loaded) and for vmlinux, where only after we load all the * symbols we'll know where it starts and ends. */ -struct map *map__new2(u64 start, struct dso *dso, enum map_type type) +struct map *map__new2(u64 start, struct dso *dso) { struct map *map = calloc(1, (sizeof(*map) + (dso->kernel ? sizeof(struct kmap) : 0))); @@ -239,7 +232,7 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type) /* * ->end will be filled after we load all the symbols */ - map__init(map, type, start, 0, 0, dso); + map__init(map, start, 0, 0, dso); } return map; @@ -256,7 +249,19 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type) */ bool __map__is_kernel(const struct map *map) { - return __machine__kernel_map(map->groups->machine, map->type) == map; + return machine__kernel_map(map->groups->machine) == map; +} + +bool __map__is_extra_kernel_map(const struct map *map) +{ + struct kmap *kmap = __map__kmap((struct map *)map); + + return kmap && kmap->name[0]; +} + +bool map__has_symbols(const struct map *map) +{ + return dso__has_symbols(map->dso); } static void map__exit(struct map *map) @@ -279,7 +284,7 @@ void map__put(struct map *map) void map__fixup_start(struct map *map) { - struct rb_root *symbols = &map->dso->symbols[map->type]; + struct rb_root *symbols = &map->dso->symbols; struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); @@ -289,7 +294,7 @@ void map__fixup_start(struct map *map) void map__fixup_end(struct map *map) { - struct rb_root *symbols = &map->dso->symbols[map->type]; + struct rb_root *symbols = &map->dso->symbols; struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); @@ -304,7 +309,7 @@ int map__load(struct map *map) const char *name = map->dso->long_name; int nr; - if (dso__loaded(map->dso, map->type)) + if (dso__loaded(map->dso)) return 0; nr = dso__load(map->dso, map); @@ -348,7 +353,7 @@ struct symbol *map__find_symbol(struct map *map, u64 addr) if (map__load(map) < 0) return NULL; - return dso__find_symbol(map->dso, map->type, addr); + return dso__find_symbol(map->dso, addr); } struct symbol *map__find_symbol_by_name(struct map *map, const char *name) @@ -356,10 +361,10 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name) if (map__load(map) < 0) return NULL; - if (!dso__sorted_by_name(map->dso, map->type)) - dso__sort_by_name(map->dso, map->type); + if (!dso__sorted_by_name(map->dso)) + dso__sort_by_name(map->dso); - return dso__find_symbol_by_name(map->dso, map->type, name); + return dso__find_symbol_by_name(map->dso, name); } struct map *map__clone(struct map *from) @@ -494,10 +499,7 @@ static void maps__init(struct maps *maps) void map_groups__init(struct map_groups *mg, struct machine *machine) { - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) { - maps__init(&mg->maps[i]); - } + maps__init(&mg->maps); mg->machine = machine; refcount_set(&mg->refcnt, 1); } @@ -525,22 +527,12 @@ static void maps__exit(struct maps *maps) void map_groups__exit(struct map_groups *mg) { - int i; - - for (i = 0; i < MAP__NR_TYPES; ++i) - maps__exit(&mg->maps[i]); + maps__exit(&mg->maps); } bool map_groups__empty(struct map_groups *mg) { - int i; - - for (i = 0; i < MAP__NR_TYPES; ++i) { - if (maps__first(&mg->maps[i])) - return false; - } - - return true; + return !maps__first(&mg->maps); } struct map_groups *map_groups__new(struct machine *machine) @@ -566,10 +558,9 @@ void map_groups__put(struct map_groups *mg) } struct symbol *map_groups__find_symbol(struct map_groups *mg, - enum map_type type, u64 addr, - struct map **mapp) + u64 addr, struct map **mapp) { - struct map *map = map_groups__find(mg, type, addr); + struct map *map = map_groups__find(mg, addr); /* Ensure map is loaded before using map->map_ip */ if (map != NULL && map__load(map) >= 0) { @@ -608,13 +599,10 @@ out: } struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, - enum map_type type, const char *name, struct map **mapp) { - struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp); - - return sym; + return maps__find_symbol_by_name(&mg->maps, name, mapp); } int map_groups__find_ams(struct addr_map_symbol *ams) @@ -622,8 +610,7 @@ int map_groups__find_ams(struct addr_map_symbol *ams) if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { if (ams->map->groups == NULL) return -1; - ams->map = map_groups__find(ams->map->groups, ams->map->type, - ams->addr); + ams->map = map_groups__find(ams->map->groups, ams->addr); if (ams->map == NULL) return -1; } @@ -646,7 +633,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp) printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); if (verbose > 2) { - printed += dso__fprintf(pos->dso, pos->type, fp); + printed += dso__fprintf(pos->dso, fp); printed += fprintf(fp, "--\n"); } } @@ -656,24 +643,14 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp) return printed; } -size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, - FILE *fp) -{ - size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); - return printed += maps__fprintf(&mg->maps[type], fp); -} - size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) { - size_t printed = 0, i; - for (i = 0; i < MAP__NR_TYPES; ++i) - printed += __map_groups__fprintf_maps(mg, i, fp); - return printed; + return maps__fprintf(&mg->maps, fp); } static void __map_groups__insert(struct map_groups *mg, struct map *map) { - __maps__insert(&mg->maps[map->type], map); + __maps__insert(&mg->maps, map); map->groups = mg; } @@ -758,19 +735,18 @@ out: int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp) { - return maps__fixup_overlappings(&mg->maps[map->type], map, fp); + return maps__fixup_overlappings(&mg->maps, map, fp); } /* * XXX This should not really _copy_ te maps, but refcount them. */ -int map_groups__clone(struct thread *thread, - struct map_groups *parent, enum map_type type) +int map_groups__clone(struct thread *thread, struct map_groups *parent) { struct map_groups *mg = thread->mg; int err = -ENOMEM; struct map *map; - struct maps *maps = &parent->maps[type]; + struct maps *maps = &parent->maps; down_read(&maps->lock); @@ -877,15 +853,22 @@ struct map *map__next(struct map *map) return NULL; } -struct kmap *map__kmap(struct map *map) +struct kmap *__map__kmap(struct map *map) { - if (!map->dso || !map->dso->kernel) { - pr_err("Internal error: map__kmap with a non-kernel map\n"); + if (!map->dso || !map->dso->kernel) return NULL; - } return (struct kmap *)(map + 1); } +struct kmap *map__kmap(struct map *map) +{ + struct kmap *kmap = __map__kmap(map); + + if (!kmap) + pr_err("Internal error: map__kmap with a non-kernel map\n"); + return kmap; +} + struct map_groups *map__kmaps(struct map *map) { struct kmap *kmap = map__kmap(map); diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 0e9bbe01b0ab..97e2a063bd65 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -8,19 +8,11 @@ #include <linux/rbtree.h> #include <pthread.h> #include <stdio.h> +#include <string.h> #include <stdbool.h> #include <linux/types.h> #include "rwsem.h" -enum map_type { - MAP__FUNCTION = 0, - MAP__VARIABLE, -}; - -#define MAP__NR_TYPES (MAP__VARIABLE + 1) - -extern const char *map_type__name[MAP__NR_TYPES]; - struct dso; struct ip_callchain; struct ref_reloc_sym; @@ -35,7 +27,6 @@ struct map { }; u64 start; u64 end; - u8 /* enum map_type */ type; bool erange_warned; u32 priv; u32 prot; @@ -56,9 +47,12 @@ struct map { refcount_t refcnt; }; +#define KMAP_NAME_LEN 256 + struct kmap { struct ref_reloc_sym *ref_reloc_sym; struct map_groups *kmaps; + char name[KMAP_NAME_LEN]; }; struct maps { @@ -67,7 +61,7 @@ struct maps { }; struct map_groups { - struct maps maps[MAP__NR_TYPES]; + struct maps maps; struct machine *machine; refcount_t refcnt; }; @@ -85,6 +79,7 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg) void map_groups__put(struct map_groups *mg); +struct kmap *__map__kmap(struct map *map); struct kmap *map__kmap(struct map *map); struct map_groups *map__kmaps(struct map *map); @@ -125,7 +120,7 @@ struct thread; * Note: caller must ensure map->dso is not NULL (map is loaded). */ #define map__for_each_symbol(map, pos, n) \ - dso__for_each_symbol(map->dso, pos, n, map->type) + dso__for_each_symbol(map->dso, pos, n) /* map__for_each_symbol_with_name - iterate over the symbols in the given map * that have the given name @@ -144,13 +139,13 @@ struct thread; #define map__for_each_symbol_by_name(map, sym_name, pos) \ __map__for_each_symbol_by_name(map, sym_name, (pos)) -void map__init(struct map *map, enum map_type type, +void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso); struct map *map__new(struct machine *machine, u64 start, u64 len, u64 pgoff, u32 d_maj, u32 d_min, u64 ino, u64 ino_gen, u32 prot, u32 flags, - char *filename, enum map_type type, struct thread *thread); -struct map *map__new2(u64 start, struct dso *dso, enum map_type type); + char *filename, struct thread *thread); +struct map *map__new2(u64 start, struct dso *dso); void map__delete(struct map *map); struct map *map__clone(struct map *map); @@ -185,8 +180,6 @@ void map__fixup_end(struct map *map); void map__reloc_vmlinux(struct map *map); -size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, - FILE *fp); void maps__insert(struct maps *maps, struct map *map); void maps__remove(struct maps *maps, struct map *map); struct map *maps__find(struct maps *maps, u64 addr); @@ -197,34 +190,29 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, void map_groups__init(struct map_groups *mg, struct machine *machine); void map_groups__exit(struct map_groups *mg); int map_groups__clone(struct thread *thread, - struct map_groups *parent, enum map_type type); + struct map_groups *parent); size_t map_groups__fprintf(struct map_groups *mg, FILE *fp); -int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, - u64 addr); +int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, + u64 addr); static inline void map_groups__insert(struct map_groups *mg, struct map *map) { - maps__insert(&mg->maps[map->type], map); + maps__insert(&mg->maps, map); map->groups = mg; } static inline void map_groups__remove(struct map_groups *mg, struct map *map) { - maps__remove(&mg->maps[map->type], map); + maps__remove(&mg->maps, map); } -static inline struct map *map_groups__find(struct map_groups *mg, - enum map_type type, u64 addr) +static inline struct map *map_groups__find(struct map_groups *mg, u64 addr) { - return maps__find(&mg->maps[type], addr); + return maps__find(&mg->maps, addr); } -static inline struct map *map_groups__first(struct map_groups *mg, - enum map_type type) -{ - return maps__first(&mg->maps[type]); -} +struct map *map_groups__first(struct map_groups *mg); static inline struct map *map_groups__next(struct map *map) { @@ -232,11 +220,9 @@ static inline struct map *map_groups__next(struct map *map) } struct symbol *map_groups__find_symbol(struct map_groups *mg, - enum map_type type, u64 addr, - struct map **mapp); + u64 addr, struct map **mapp); struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, - enum map_type type, const char *name, struct map **mapp); @@ -244,24 +230,26 @@ struct addr_map_symbol; int map_groups__find_ams(struct addr_map_symbol *ams); -static inline -struct symbol *map_groups__find_function_by_name(struct map_groups *mg, - const char *name, struct map **mapp) -{ - return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp); -} - int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp); -struct map *map_groups__find_by_name(struct map_groups *mg, - enum map_type type, const char *name); +struct map *map_groups__find_by_name(struct map_groups *mg, const char *name); bool __map__is_kernel(const struct map *map); +bool __map__is_extra_kernel_map(const struct map *map); static inline bool __map__is_kmodule(const struct map *map) { - return !__map__is_kernel(map); + return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map); +} + +bool map__has_symbols(const struct map *map); + +#define ENTRY_TRAMPOLINE_NAME "__entry_SYSCALL_64_trampoline" + +static inline bool is_entry_trampoline(const char *name) +{ + return !strcmp(name, ENTRY_TRAMPOLINE_NAME); } #endif /* __PERF_MAP_H */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 2fc4ee8b86c1..15eec49e71a1 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -156,13 +156,12 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { (strcmp(sys_dirent->d_name, ".")) && \ (strcmp(sys_dirent->d_name, ".."))) -static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) +static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir) { char evt_path[MAXPATHLEN]; int fd; - snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, - sys_dir->d_name, evt_dir->d_name); + snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) return -EINVAL; @@ -171,12 +170,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) return 0; } -#define for_each_event(sys_dirent, evt_dir, evt_dirent) \ +#define for_each_event(dir_path, evt_dir, evt_dirent) \ while ((evt_dirent = readdir(evt_dir)) != NULL) \ if (evt_dirent->d_type == DT_DIR && \ (strcmp(evt_dirent->d_name, ".")) && \ (strcmp(evt_dirent->d_name, "..")) && \ - (!tp_event_has_id(sys_dirent, evt_dirent))) + (!tp_event_has_id(dir_path, evt_dirent))) #define MAX_EVENT_LENGTH 512 @@ -190,21 +189,21 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) int fd; u64 id; char evt_path[MAXPATHLEN]; - char dir_path[MAXPATHLEN]; + char *dir_path; - sys_dir = opendir(tracing_events_path); + sys_dir = tracing_events__opendir(); if (!sys_dir) return NULL; for_each_subsystem(sys_dir, sys_dirent) { - - snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent->d_name); + dir_path = get_events_file(sys_dirent->d_name); + if (!dir_path) + continue; evt_dir = opendir(dir_path); if (!evt_dir) - continue; + goto next; - for_each_event(sys_dirent, evt_dir, evt_dirent) { + for_each_event(dir_path, evt_dir, evt_dirent) { scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dirent->d_name); @@ -218,6 +217,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) close(fd); id = atoll(id_buf); if (id == config) { + put_events_file(dir_path); closedir(evt_dir); closedir(sys_dir); path = zalloc(sizeof(*path)); @@ -242,6 +242,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) } } closedir(evt_dir); +next: + put_events_file(dir_path); } closedir(sys_dir); @@ -512,14 +514,19 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx, struct parse_events_error *err, struct list_head *head_config) { - char evt_path[MAXPATHLEN]; + char *evt_path; struct dirent *evt_ent; DIR *evt_dir; int ret = 0, found = 0; - snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); + evt_path = get_events_file(sys_name); + if (!evt_path) { + tracepoint_error(err, errno, sys_name, evt_name); + return -1; + } evt_dir = opendir(evt_path); if (!evt_dir) { + put_events_file(evt_path); tracepoint_error(err, errno, sys_name, evt_name); return -1; } @@ -545,6 +552,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx, ret = -1; } + put_events_file(evt_path); closedir(evt_dir); return ret; } @@ -570,7 +578,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx, DIR *events_dir; int ret = 0; - events_dir = opendir(tracing_events_path); + events_dir = tracing_events__opendir(); if (!events_dir) { tracepoint_error(err, errno, sys_name, evt_name); return -1; @@ -2092,13 +2100,13 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, DIR *sys_dir, *evt_dir; struct dirent *sys_dirent, *evt_dirent; char evt_path[MAXPATHLEN]; - char dir_path[MAXPATHLEN]; + char *dir_path; char **evt_list = NULL; unsigned int evt_i = 0, evt_num = 0; bool evt_num_known = false; restart: - sys_dir = opendir(tracing_events_path); + sys_dir = tracing_events__opendir(); if (!sys_dir) return; @@ -2113,13 +2121,14 @@ restart: !strglobmatch(sys_dirent->d_name, subsys_glob)) continue; - snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent->d_name); + dir_path = get_events_file(sys_dirent->d_name); + if (!dir_path) + continue; evt_dir = opendir(dir_path); if (!evt_dir) - continue; + goto next; - for_each_event(sys_dirent, evt_dir, evt_dirent) { + for_each_event(dir_path, evt_dir, evt_dirent) { if (event_glob != NULL && !strglobmatch(evt_dirent->d_name, event_glob)) continue; @@ -2133,11 +2142,15 @@ restart: sys_dirent->d_name, evt_dirent->d_name); evt_list[evt_i] = strdup(evt_path); - if (evt_list[evt_i] == NULL) + if (evt_list[evt_i] == NULL) { + put_events_file(dir_path); goto out_close_evt_dir; + } evt_i++; } closedir(evt_dir); +next: + put_events_file(dir_path); } closedir(sys_dir); @@ -2185,21 +2198,21 @@ int is_valid_tracepoint(const char *event_string) DIR *sys_dir, *evt_dir; struct dirent *sys_dirent, *evt_dirent; char evt_path[MAXPATHLEN]; - char dir_path[MAXPATHLEN]; + char *dir_path; - sys_dir = opendir(tracing_events_path); + sys_dir = tracing_events__opendir(); if (!sys_dir) return 0; for_each_subsystem(sys_dir, sys_dirent) { - - snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent->d_name); + dir_path = get_events_file(sys_dirent->d_name); + if (!dir_path) + continue; evt_dir = opendir(dir_path); if (!evt_dir) - continue; + goto next; - for_each_event(sys_dirent, evt_dir, evt_dirent) { + for_each_event(dir_path, evt_dir, evt_dirent) { snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent->d_name, evt_dirent->d_name); if (!strcmp(evt_path, event_string)) { @@ -2209,6 +2222,8 @@ int is_valid_tracepoint(const char *event_string) } } closedir(evt_dir); +next: + put_events_file(dir_path); } closedir(sys_dir); return 0; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e1dbc9821617..3094f11e7d81 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -111,17 +111,6 @@ void exit_probe_symbol_maps(void) symbol__exit(); } -static struct symbol *__find_kernel_function_by_name(const char *name, - struct map **mapp) -{ - return machine__find_kernel_function_by_name(host_machine, name, mapp); -} - -static struct symbol *__find_kernel_function(u64 addr, struct map **mapp) -{ - return machine__find_kernel_function(host_machine, addr, mapp); -} - static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) { /* kmap->ref_reloc_sym should be set if host_machine is initialized */ @@ -149,7 +138,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, if (reloc_sym && strcmp(name, reloc_sym->name) == 0) *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; else { - sym = __find_kernel_function_by_name(name, &map); + sym = machine__find_kernel_symbol_by_name(host_machine, name, &map); if (!sym) return -ENOENT; *addr = map->unmap_ip(map, sym->start) - @@ -161,8 +150,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, static struct map *kernel_get_module_map(const char *module) { - struct map_groups *grp = &host_machine->kmaps; - struct maps *maps = &grp->maps[MAP__FUNCTION]; + struct maps *maps = machine__kernel_maps(host_machine); struct map *pos; /* A file path -- this is an offline module */ @@ -341,7 +329,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) char module_name[128]; snprintf(module_name, sizeof(module_name), "[%s]", module); - map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); + map = map_groups__find_by_name(&host_machine->kmaps, module_name); if (map) { dso = map->dso; goto found; @@ -2098,7 +2086,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp, } if (addr) { addr += tp->offset; - sym = __find_kernel_function(addr, &map); + sym = machine__find_kernel_symbol(host_machine, addr, &map); } } @@ -3504,19 +3492,18 @@ int show_available_funcs(const char *target, struct nsinfo *nsi, (target) ? : "kernel"); goto end; } - if (!dso__sorted_by_name(map->dso, map->type)) - dso__sort_by_name(map->dso, map->type); + if (!dso__sorted_by_name(map->dso)) + dso__sort_by_name(map->dso); /* Show all (filtered) symbols */ setup_pager(); - for (nd = rb_first(&map->dso->symbol_names[map->type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) { struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); if (strfilter__compare(_filter, pos->sym.name)) printf("%s\n", pos->sym.name); - } - + } end: map__put(map); exit_probe_symbol_maps(); diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index 4ae1123c6794..b76088fadf3d 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -84,8 +84,7 @@ int open_trace_file(const char *trace_file, bool readwrite) char buf[PATH_MAX]; int ret; - ret = e_snprintf(buf, PATH_MAX, "%s/%s", - tracing_path, trace_file); + ret = e_snprintf(buf, PATH_MAX, "%s/%s", tracing_path_mount(), trace_file); if (ret >= 0) { pr_debug("Opening %s write=%d\n", buf, readwrite); if (readwrite && !probe_event_dry_run) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index f4a7a437ee87..b998bb475589 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1973,12 +1973,11 @@ bool perf_session__has_traces(struct perf_session *session, const char *msg) return false; } -int maps__set_kallsyms_ref_reloc_sym(struct map **maps, - const char *symbol_name, u64 addr) +int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) { char *bracket; - int i; struct ref_reloc_sym *ref; + struct kmap *kmap; ref = zalloc(sizeof(struct ref_reloc_sym)); if (ref == NULL) @@ -1996,13 +1995,9 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps, ref->addr = addr; - for (i = 0; i < MAP__NR_TYPES; ++i) { - struct kmap *kmap = map__kmap(maps[i]); - - if (!kmap) - continue; + kmap = map__kmap(map); + if (kmap) kmap->ref_reloc_sym = ref; - } return 0; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 26a68dfd8a4f..4058ade352a5 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2,7 +2,7 @@ #include <errno.h> #include <inttypes.h> #include <regex.h> -#include <sys/mman.h> +#include <linux/mman.h> #include "sort.h" #include "hist.h" #include "comm.h" @@ -282,7 +282,7 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); if (sym && map) { - if (map->type == MAP__VARIABLE) { + if (sym->type == STT_OBJECT) { ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", ip - map->unmap_ip(map, sym->start)); @@ -1211,7 +1211,7 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, /* print [s] for shared data mmaps */ if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && - map && (map->type == MAP__VARIABLE) && + map && !(map->prot & PROT_EXEC) && (map->flags & MAP_SHARED) && (map->maj || map->min || map->ino || map->ino_generation)) @@ -2582,7 +2582,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok, if (sort__mode != SORT_MODE__MEMORY) return -EINVAL; - if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) + if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) return -EINVAL; if (sd->entry == &sort_mem_daddr_sym) @@ -2628,7 +2628,7 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str, if (*tok) { ret = sort_dimension__add(list, tok, evlist, level); if (ret == -EINVAL) { - if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) + if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); else pr_err("Invalid --sort key: `%s'", tok); diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 035b62e2c60b..9e6896293bbd 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -186,13 +186,13 @@ static inline float hist_entry__get_percent_limit(struct hist_entry *he) static inline u64 cl_address(u64 address) { /* return the cacheline of the address */ - return (address & ~(cacheline_size - 1)); + return (address & ~(cacheline_size() - 1)); } static inline u64 cl_offset(u64 address) { /* return the cacheline of the address */ - return (address & (cacheline_size - 1)); + return (address & (cacheline_size() - 1)); } enum sort_mode { diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index 3c21fd059b64..09d6746e6ec8 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c @@ -103,6 +103,7 @@ static struct symbol *new_inline_sym(struct dso *dso, inline_sym = symbol__new(base_sym ? base_sym->start : 0, base_sym ? base_sym->end : 0, base_sym ? base_sym->binding : 0, + base_sym ? base_sym->type : 0, funcname); if (inline_sym) inline_sym->inlined = 1; diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 8f56ba4fd258..36efb986f7fc 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -7,8 +7,7 @@ #include "xyarray.h" #include "rblist.h" -struct stats -{ +struct stats { double n, mean, M2; u64 max, min; }; diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 2de770511e70..29770ea61768 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -114,16 +114,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym) sym->st_shndx != SHN_ABS; } -static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) +static bool elf_sym__filter(GElf_Sym *sym) { - switch (type) { - case MAP__FUNCTION: - return elf_sym__is_function(sym); - case MAP__VARIABLE: - return elf_sym__is_object(sym); - default: - return false; - } + return elf_sym__is_function(sym) || elf_sym__is_object(sym); } static inline const char *elf_sym__name(const GElf_Sym *sym, @@ -150,17 +143,10 @@ static inline bool elf_sec__is_data(const GElf_Shdr *shdr, return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; } -static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, - enum map_type type) +static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs) { - switch (type) { - case MAP__FUNCTION: - return elf_sec__is_text(shdr, secstrs); - case MAP__VARIABLE: - return elf_sec__is_data(shdr, secstrs); - default: - return false; - } + return elf_sec__is_text(shdr, secstrs) || + elf_sec__is_data(shdr, secstrs); } static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) @@ -256,7 +242,7 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name) * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map) +int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) { uint32_t nr_rel_entries, idx; GElf_Sym sym; @@ -364,12 +350,12 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map * free(demangled); f = symbol__new(plt_offset, plt_entry_size, - STB_GLOBAL, sympltname); + STB_GLOBAL, STT_FUNC, sympltname); if (!f) goto out_elf_end; plt_offset += plt_entry_size; - symbols__insert(&dso->symbols[map->type], f); + symbols__insert(&dso->symbols, f); ++nr; } } else if (shdr_rel_plt.sh_type == SHT_REL) { @@ -390,12 +376,12 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map * free(demangled); f = symbol__new(plt_offset, plt_entry_size, - STB_GLOBAL, sympltname); + STB_GLOBAL, STT_FUNC, sympltname); if (!f) goto out_elf_end; plt_offset += plt_entry_size; - symbols__insert(&dso->symbols[map->type], f); + symbols__insert(&dso->symbols, f); ++nr; } } @@ -811,6 +797,110 @@ static u64 ref_reloc(struct kmap *kmap) void __weak arch__sym_update(struct symbol *s __maybe_unused, GElf_Sym *sym __maybe_unused) { } +static int dso__process_kernel_symbol(struct dso *dso, struct map *map, + GElf_Sym *sym, GElf_Shdr *shdr, + struct map_groups *kmaps, struct kmap *kmap, + struct dso **curr_dsop, struct map **curr_mapp, + const char *section_name, + bool adjust_kernel_syms, bool kmodule, bool *remap_kernel) +{ + struct dso *curr_dso = *curr_dsop; + struct map *curr_map; + char dso_name[PATH_MAX]; + + /* Adjust symbol to map to file offset */ + if (adjust_kernel_syms) + sym->st_value -= shdr->sh_addr - shdr->sh_offset; + + if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0) + return 0; + + if (strcmp(section_name, ".text") == 0) { + /* + * The initial kernel mapping is based on + * kallsyms and identity maps. Overwrite it to + * map to the kernel dso. + */ + if (*remap_kernel && dso->kernel) { + *remap_kernel = false; + map->start = shdr->sh_addr + ref_reloc(kmap); + map->end = map->start + shdr->sh_size; + map->pgoff = shdr->sh_offset; + map->map_ip = map__map_ip; + map->unmap_ip = map__unmap_ip; + /* Ensure maps are correctly ordered */ + if (kmaps) { + map__get(map); + map_groups__remove(kmaps, map); + map_groups__insert(kmaps, map); + map__put(map); + } + } + + /* + * The initial module mapping is based on + * /proc/modules mapped to offset zero. + * Overwrite it to map to the module dso. + */ + if (*remap_kernel && kmodule) { + *remap_kernel = false; + map->pgoff = shdr->sh_offset; + } + + *curr_mapp = map; + *curr_dsop = dso; + return 0; + } + + if (!kmap) + return 0; + + snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); + + curr_map = map_groups__find_by_name(kmaps, dso_name); + if (curr_map == NULL) { + u64 start = sym->st_value; + + if (kmodule) + start += map->start + shdr->sh_offset; + + curr_dso = dso__new(dso_name); + if (curr_dso == NULL) + return -1; + curr_dso->kernel = dso->kernel; + curr_dso->long_name = dso->long_name; + curr_dso->long_name_len = dso->long_name_len; + curr_map = map__new2(start, curr_dso); + dso__put(curr_dso); + if (curr_map == NULL) + return -1; + + if (adjust_kernel_syms) { + curr_map->start = shdr->sh_addr + ref_reloc(kmap); + curr_map->end = curr_map->start + shdr->sh_size; + curr_map->pgoff = shdr->sh_offset; + } else { + curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; + } + curr_dso->symtab_type = dso->symtab_type; + map_groups__insert(kmaps, curr_map); + /* + * Add it before we drop the referece to curr_map, i.e. while + * we still are sure to have a reference to this DSO via + * *curr_map->dso. + */ + dsos__add(&map->groups->machine->dsos, curr_dso); + /* kmaps already got it */ + map__put(curr_map); + dso__set_loaded(curr_dso); + *curr_mapp = curr_map; + *curr_dsop = curr_dso; + } else + *curr_dsop = curr_map->dso; + + return 0; +} + int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, int kmodule) { @@ -844,7 +934,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, * have the wrong values for the dso maps, so remove them. */ if (kmodule && syms_ss->symtab) - symbols__delete(&dso->symbols[map->type]); + symbols__delete(&dso->symbols); if (!syms_ss->symtab) { /* @@ -921,10 +1011,10 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); /* - * Initial kernel and module mappings do not map to the dso. For - * function mappings, flag the fixups. + * Initial kernel and module mappings do not map to the dso. + * Flag the fixups. */ - if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { + if (dso->kernel || kmodule) { remap_kernel = true; adjust_kernel_syms = dso->adjust_symbols; } @@ -936,7 +1026,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, const char *section_name; bool used_opd = false; - if (!is_label && !elf_sym__is_a(&sym, map->type)) + if (!is_label && !elf_sym__filter(&sym)) continue; /* Reject ARM ELF "mapping symbols": these aren't unique and @@ -974,7 +1064,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, gelf_getshdr(sec, &shdr); - if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) + if (is_label && !elf_sec__filter(&shdr, secstrs)) continue; section_name = elf_sec__name(&shdr, secstrs); @@ -982,134 +1072,37 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, /* On ARM, symbols for thumb functions have 1 added to * the symbol address as a flag - remove it */ if ((ehdr.e_machine == EM_ARM) && - (map->type == MAP__FUNCTION) && + (GELF_ST_TYPE(sym.st_info) == STT_FUNC) && (sym.st_value & 1)) --sym.st_value; if (dso->kernel || kmodule) { - char dso_name[PATH_MAX]; - - /* Adjust symbol to map to file offset */ - if (adjust_kernel_syms) - sym.st_value -= shdr.sh_addr - shdr.sh_offset; - - if (strcmp(section_name, - (curr_dso->short_name + - dso->short_name_len)) == 0) - goto new_symbol; - - if (strcmp(section_name, ".text") == 0) { - /* - * The initial kernel mapping is based on - * kallsyms and identity maps. Overwrite it to - * map to the kernel dso. - */ - if (remap_kernel && dso->kernel) { - remap_kernel = false; - map->start = shdr.sh_addr + - ref_reloc(kmap); - map->end = map->start + shdr.sh_size; - map->pgoff = shdr.sh_offset; - map->map_ip = map__map_ip; - map->unmap_ip = map__unmap_ip; - /* Ensure maps are correctly ordered */ - if (kmaps) { - map__get(map); - map_groups__remove(kmaps, map); - map_groups__insert(kmaps, map); - map__put(map); - } - } - - /* - * The initial module mapping is based on - * /proc/modules mapped to offset zero. - * Overwrite it to map to the module dso. - */ - if (remap_kernel && kmodule) { - remap_kernel = false; - map->pgoff = shdr.sh_offset; - } - - curr_map = map; - curr_dso = dso; - goto new_symbol; - } - - if (!kmap) - goto new_symbol; - - snprintf(dso_name, sizeof(dso_name), - "%s%s", dso->short_name, section_name); - - curr_map = map_groups__find_by_name(kmaps, map->type, dso_name); - if (curr_map == NULL) { - u64 start = sym.st_value; - - if (kmodule) - start += map->start + shdr.sh_offset; - - curr_dso = dso__new(dso_name); - if (curr_dso == NULL) - goto out_elf_end; - curr_dso->kernel = dso->kernel; - curr_dso->long_name = dso->long_name; - curr_dso->long_name_len = dso->long_name_len; - curr_map = map__new2(start, curr_dso, - map->type); - dso__put(curr_dso); - if (curr_map == NULL) { - goto out_elf_end; - } - if (adjust_kernel_syms) { - curr_map->start = shdr.sh_addr + - ref_reloc(kmap); - curr_map->end = curr_map->start + - shdr.sh_size; - curr_map->pgoff = shdr.sh_offset; - } else { - curr_map->map_ip = identity__map_ip; - curr_map->unmap_ip = identity__map_ip; - } - curr_dso->symtab_type = dso->symtab_type; - map_groups__insert(kmaps, curr_map); - /* - * Add it before we drop the referece to curr_map, - * i.e. while we still are sure to have a reference - * to this DSO via curr_map->dso. - */ - dsos__add(&map->groups->machine->dsos, curr_dso); - /* kmaps already got it */ - map__put(curr_map); - dso__set_loaded(curr_dso, map->type); - } else - curr_dso = curr_map->dso; - - goto new_symbol; - } - - if ((used_opd && runtime_ss->adjust_symbols) - || (!used_opd && syms_ss->adjust_symbols)) { + if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map, + section_name, adjust_kernel_syms, kmodule, &remap_kernel)) + goto out_elf_end; + } else if ((used_opd && runtime_ss->adjust_symbols) || + (!used_opd && syms_ss->adjust_symbols)) { pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } -new_symbol: + demangled = demangle_sym(dso, kmodule, elf_name); if (demangled != NULL) elf_name = demangled; f = symbol__new(sym.st_value, sym.st_size, - GELF_ST_BIND(sym.st_info), elf_name); + GELF_ST_BIND(sym.st_info), + GELF_ST_TYPE(sym.st_info), elf_name); free(demangled); if (!f) goto out_elf_end; arch__sym_update(f, &sym); - __symbols__insert(&curr_dso->symbols[curr_map->type], f, dso->kernel); + __symbols__insert(&curr_dso->symbols, f, dso->kernel); nr++; } @@ -1117,14 +1110,14 @@ new_symbol: * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - symbols__fixup_end(&dso->symbols[map->type]); - symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols); + symbols__fixup_duplicate(&dso->symbols); if (kmap) { /* * We need to fixup this here too because we create new * maps here, for things like vsyscall sections. */ - __map_groups__fixup_end(kmaps, map->type); + map_groups__fixup_end(kmaps); } } err = nr; @@ -1393,8 +1386,16 @@ static off_t kcore__write(struct kcore *kcore) struct phdr_data { off_t offset; + off_t rel; u64 addr; u64 len; + struct list_head node; + struct phdr_data *remaps; +}; + +struct sym_data { + u64 addr; + struct list_head node; }; struct kcore_copy_info { @@ -1404,16 +1405,78 @@ struct kcore_copy_info { u64 last_symbol; u64 first_module; u64 last_module_symbol; - struct phdr_data kernel_map; - struct phdr_data modules_map; + size_t phnum; + struct list_head phdrs; + struct list_head syms; }; +#define kcore_copy__for_each_phdr(k, p) \ + list_for_each_entry((p), &(k)->phdrs, node) + +static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset) +{ + struct phdr_data *p = zalloc(sizeof(*p)); + + if (p) { + p->addr = addr; + p->len = len; + p->offset = offset; + } + + return p; +} + +static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci, + u64 addr, u64 len, + off_t offset) +{ + struct phdr_data *p = phdr_data__new(addr, len, offset); + + if (p) + list_add_tail(&p->node, &kci->phdrs); + + return p; +} + +static void kcore_copy__free_phdrs(struct kcore_copy_info *kci) +{ + struct phdr_data *p, *tmp; + + list_for_each_entry_safe(p, tmp, &kci->phdrs, node) { + list_del(&p->node); + free(p); + } +} + +static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci, + u64 addr) +{ + struct sym_data *s = zalloc(sizeof(*s)); + + if (s) { + s->addr = addr; + list_add_tail(&s->node, &kci->syms); + } + + return s; +} + +static void kcore_copy__free_syms(struct kcore_copy_info *kci) +{ + struct sym_data *s, *tmp; + + list_for_each_entry_safe(s, tmp, &kci->syms, node) { + list_del(&s->node); + free(s); + } +} + static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, u64 start) { struct kcore_copy_info *kci = arg; - if (!symbol_type__is_a(type, MAP__FUNCTION)) + if (!kallsyms__is_function(type)) return 0; if (strchr(name, '[')) { @@ -1438,6 +1501,9 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, return 0; } + if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start)) + return -1; + return 0; } @@ -1487,27 +1553,39 @@ static int kcore_copy__parse_modules(struct kcore_copy_info *kci, return 0; } -static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, - u64 s, u64 e) +static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end, + u64 pgoff, u64 s, u64 e) { - if (p->addr || s < start || s >= end) - return; + u64 len, offset; + + if (s < start || s >= end) + return 0; - p->addr = s; - p->offset = (s - start) + pgoff; - p->len = e < end ? e - s : end - s; + offset = (s - start) + pgoff; + len = e < end ? e - s : end - s; + + return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1; } static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) { struct kcore_copy_info *kci = data; u64 end = start + len; + struct sym_data *sdat; - kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, - kci->etext); + if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext)) + return -1; - kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, - kci->last_module_symbol); + if (kcore_copy__map(kci, start, end, pgoff, kci->first_module, + kci->last_module_symbol)) + return -1; + + list_for_each_entry(sdat, &kci->syms, node) { + u64 s = round_down(sdat->addr, page_size); + + if (kcore_copy__map(kci, start, end, pgoff, s, s + len)) + return -1; + } return 0; } @@ -1520,6 +1598,64 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) return 0; } +static void kcore_copy__find_remaps(struct kcore_copy_info *kci) +{ + struct phdr_data *p, *k = NULL; + u64 kend; + + if (!kci->stext) + return; + + /* Find phdr that corresponds to the kernel map (contains stext) */ + kcore_copy__for_each_phdr(kci, p) { + u64 pend = p->addr + p->len - 1; + + if (p->addr <= kci->stext && pend >= kci->stext) { + k = p; + break; + } + } + + if (!k) + return; + + kend = k->offset + k->len; + + /* Find phdrs that remap the kernel */ + kcore_copy__for_each_phdr(kci, p) { + u64 pend = p->offset + p->len; + + if (p == k) + continue; + + if (p->offset >= k->offset && pend <= kend) + p->remaps = k; + } +} + +static void kcore_copy__layout(struct kcore_copy_info *kci) +{ + struct phdr_data *p; + off_t rel = 0; + + kcore_copy__find_remaps(kci); + + kcore_copy__for_each_phdr(kci, p) { + if (!p->remaps) { + p->rel = rel; + rel += p->len; + } + kci->phnum += 1; + } + + kcore_copy__for_each_phdr(kci, p) { + struct phdr_data *k = p->remaps; + + if (k) + p->rel = p->offset - k->offset + k->rel; + } +} + static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, Elf *elf) { @@ -1555,7 +1691,12 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, if (kci->first_module && !kci->last_module_symbol) return -1; - return kcore_copy__read_maps(kci, elf); + if (kcore_copy__read_maps(kci, elf)) + return -1; + + kcore_copy__layout(kci); + + return 0; } static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, @@ -1678,12 +1819,15 @@ int kcore_copy(const char *from_dir, const char *to_dir) { struct kcore kcore; struct kcore extract; - size_t count = 2; int idx = 0, err = -1; - off_t offset = page_size, sz, modules_offset = 0; + off_t offset, sz; struct kcore_copy_info kci = { .stext = 0, }; char kcore_filename[PATH_MAX]; char extract_filename[PATH_MAX]; + struct phdr_data *p; + + INIT_LIST_HEAD(&kci.phdrs); + INIT_LIST_HEAD(&kci.syms); if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) return -1; @@ -1703,20 +1847,17 @@ int kcore_copy(const char *from_dir, const char *to_dir) if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) goto out_kcore_close; - if (!kci.modules_map.addr) - count -= 1; - - if (kcore__copy_hdr(&kcore, &extract, count)) + if (kcore__copy_hdr(&kcore, &extract, kci.phnum)) goto out_extract_close; - if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, - kci.kernel_map.len)) - goto out_extract_close; + offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) + + gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT); + offset = round_up(offset, page_size); + + kcore_copy__for_each_phdr(&kci, p) { + off_t offs = p->rel + offset; - if (kci.modules_map.addr) { - modules_offset = offset + kci.kernel_map.len; - if (kcore__add_phdr(&extract, idx, modules_offset, - kci.modules_map.addr, kci.modules_map.len)) + if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len)) goto out_extract_close; } @@ -1724,14 +1865,14 @@ int kcore_copy(const char *from_dir, const char *to_dir) if (sz < 0 || sz > offset) goto out_extract_close; - if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, - kci.kernel_map.len)) - goto out_extract_close; + kcore_copy__for_each_phdr(&kci, p) { + off_t offs = p->rel + offset; - if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, - extract.fd, modules_offset, - kci.modules_map.len)) - goto out_extract_close; + if (p->remaps) + continue; + if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len)) + goto out_extract_close; + } if (kcore_copy__compare_file(from_dir, to_dir, "modules")) goto out_extract_close; @@ -1754,6 +1895,9 @@ out_unlink_kallsyms: if (err) kcore_copy__unlink(to_dir, "kallsyms"); + kcore_copy__free_phdrs(&kci); + kcore_copy__free_syms(&kci); + return err; } diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index ff48d0d49584..7119df77dc0b 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -288,8 +288,7 @@ void symsrc__destroy(struct symsrc *ss) } int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, - struct symsrc *ss __maybe_unused, - struct map *map __maybe_unused) + struct symsrc *ss __maybe_unused) { return 0; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1466814ebada..8c84437f2a10 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -5,6 +5,7 @@ #include <stdio.h> #include <string.h> #include <linux/kernel.h> +#include <linux/mman.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/param.h> @@ -70,18 +71,10 @@ static enum dso_binary_type binary_type_symtab[] = { #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) -bool symbol_type__is_a(char symbol_type, enum map_type map_type) +static bool symbol_type__filter(char symbol_type) { symbol_type = toupper(symbol_type); - - switch (map_type) { - case MAP__FUNCTION: - return symbol_type == 'T' || symbol_type == 'W'; - case MAP__VARIABLE: - return symbol_type == 'D'; - default: - return false; - } + return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D'; } static int prefix_underscores_count(const char *str) @@ -228,9 +221,9 @@ void symbols__fixup_end(struct rb_root *symbols) curr->end = roundup(curr->start, 4096) + 4096; } -void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) +void map_groups__fixup_end(struct map_groups *mg) { - struct maps *maps = &mg->maps[type]; + struct maps *maps = &mg->maps; struct map *next, *curr; down_write(&maps->lock); @@ -256,7 +249,7 @@ out_unlock: up_write(&maps->lock); } -struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) +struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) { size_t namelen = strlen(name) + 1; struct symbol *sym = calloc(1, (symbol_conf.priv_size + @@ -274,6 +267,7 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) sym->start = start; sym->end = len ? start + len : start; + sym->type = type; sym->binding = binding; sym->namelen = namelen - 1; @@ -484,45 +478,40 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, void dso__reset_find_symbol_cache(struct dso *dso) { - enum map_type type; - - for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) { - dso->last_find_result[type].addr = 0; - dso->last_find_result[type].symbol = NULL; - } + dso->last_find_result.addr = 0; + dso->last_find_result.symbol = NULL; } -void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym) +void dso__insert_symbol(struct dso *dso, struct symbol *sym) { - __symbols__insert(&dso->symbols[type], sym, dso->kernel); + __symbols__insert(&dso->symbols, sym, dso->kernel); /* update the symbol cache if necessary */ - if (dso->last_find_result[type].addr >= sym->start && - (dso->last_find_result[type].addr < sym->end || + if (dso->last_find_result.addr >= sym->start && + (dso->last_find_result.addr < sym->end || sym->start == sym->end)) { - dso->last_find_result[type].symbol = sym; + dso->last_find_result.symbol = sym; } } -struct symbol *dso__find_symbol(struct dso *dso, - enum map_type type, u64 addr) +struct symbol *dso__find_symbol(struct dso *dso, u64 addr) { - if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) { - dso->last_find_result[type].addr = addr; - dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr); + if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { + dso->last_find_result.addr = addr; + dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); } - return dso->last_find_result[type].symbol; + return dso->last_find_result.symbol; } -struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) +struct symbol *dso__first_symbol(struct dso *dso) { - return symbols__first(&dso->symbols[type]); + return symbols__first(&dso->symbols); } -struct symbol *dso__last_symbol(struct dso *dso, enum map_type type) +struct symbol *dso__last_symbol(struct dso *dso) { - return symbols__last(&dso->symbols[type]); + return symbols__last(&dso->symbols); } struct symbol *dso__next_symbol(struct symbol *sym) @@ -539,24 +528,22 @@ struct symbol *symbol__next_by_name(struct symbol *sym) } /* - * Teturns first symbol that matched with @name. + * Returns first symbol that matched with @name. */ -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, - const char *name) +struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name) { - struct symbol *s = symbols__find_by_name(&dso->symbol_names[type], name, + struct symbol *s = symbols__find_by_name(&dso->symbol_names, name, SYMBOL_TAG_INCLUDE__NONE); if (!s) - s = symbols__find_by_name(&dso->symbol_names[type], name, + s = symbols__find_by_name(&dso->symbol_names, name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); return s; } -void dso__sort_by_name(struct dso *dso, enum map_type type) +void dso__sort_by_name(struct dso *dso) { - dso__set_sorted_by_name(dso, type); - return symbols__sort_by_name(&dso->symbol_names[type], - &dso->symbols[type]); + dso__set_sorted_by_name(dso); + return symbols__sort_by_name(&dso->symbol_names, &dso->symbols); } int modules__parse(const char *filename, void *arg, @@ -621,11 +608,6 @@ out: return err; } -struct process_kallsyms_args { - struct map *map; - struct dso *dso; -}; - /* * These are symbols in the kernel image, so make sure that * sym is from a kernel DSO. @@ -661,10 +643,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name, char type, u64 start) { struct symbol *sym; - struct process_kallsyms_args *a = arg; - struct rb_root *root = &a->dso->symbols[a->map->type]; + struct dso *dso = arg; + struct rb_root *root = &dso->symbols; - if (!symbol_type__is_a(type, a->map->type)) + if (!symbol_type__filter(type)) return 0; /* @@ -672,7 +654,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * symbols, setting length to 0, and rely on * symbols__fixup_end() to fix it up. */ - sym = symbol__new(start, 0, kallsyms2elf_binding(type), name); + sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); if (sym == NULL) return -ENOMEM; /* @@ -689,21 +671,18 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int dso__load_all_kallsyms(struct dso *dso, const char *filename, - struct map *map) +static int dso__load_all_kallsyms(struct dso *dso, const char *filename) { - struct process_kallsyms_args args = { .map = map, .dso = dso, }; - return kallsyms__parse(filename, &args, map__process_kallsym_symbol); + return kallsyms__parse(filename, dso, map__process_kallsym_symbol); } -static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) +static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso) { - struct map_groups *kmaps = map__kmaps(map); struct map *curr_map; struct symbol *pos; int count = 0; - struct rb_root old_root = dso->symbols[map->type]; - struct rb_root *root = &dso->symbols[map->type]; + struct rb_root old_root = dso->symbols; + struct rb_root *root = &dso->symbols; struct rb_node *next = rb_first(root); if (!kmaps) @@ -723,7 +702,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) if (module) *module = '\0'; - curr_map = map_groups__find(kmaps, map->type, pos->start); + curr_map = map_groups__find(kmaps, pos->start); if (!curr_map) { symbol__delete(pos); @@ -733,7 +712,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) pos->start -= curr_map->start - curr_map->pgoff; if (pos->end) pos->end -= curr_map->start - curr_map->pgoff; - symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); + symbols__insert(&curr_map->dso->symbols, pos); ++count; } @@ -748,22 +727,25 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map) * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) +static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta, + struct map *initial_map) { - struct map_groups *kmaps = map__kmaps(map); struct machine *machine; - struct map *curr_map = map; + struct map *curr_map = initial_map; struct symbol *pos; int count = 0, moved = 0; - struct rb_root *root = &dso->symbols[map->type]; + struct rb_root *root = &dso->symbols; struct rb_node *next = rb_first(root); int kernel_range = 0; + bool x86_64; if (!kmaps) return -1; machine = kmaps->machine; + x86_64 = machine__is(machine, "x86_64"); + while (next) { char *module; @@ -778,7 +760,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) *module++ = '\0'; if (strcmp(curr_map->dso->short_name, module)) { - if (curr_map != map && + if (curr_map != initial_map && dso->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(machine)) { /* @@ -788,18 +770,16 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) * symbols are in its kmap. Mark it as * loaded. */ - dso__set_loaded(curr_map->dso, - curr_map->type); + dso__set_loaded(curr_map->dso); } - curr_map = map_groups__find_by_name(kmaps, - map->type, module); + curr_map = map_groups__find_by_name(kmaps, module); if (curr_map == NULL) { pr_debug("%s/proc/{kallsyms,modules} " "inconsistency while looking " "for \"%s\" module!\n", machine->root_dir, module); - curr_map = map; + curr_map = initial_map; goto discard_symbol; } @@ -809,11 +789,21 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) } /* * So that we look just like we get from .ko files, - * i.e. not prelinked, relative to map->start. + * i.e. not prelinked, relative to initial_map->start. */ pos->start = curr_map->map_ip(curr_map, pos->start); pos->end = curr_map->map_ip(curr_map, pos->end); - } else if (curr_map != map) { + } else if (x86_64 && is_entry_trampoline(pos->name)) { + /* + * These symbols are not needed anymore since the + * trampoline maps refer to the text section and it's + * symbols instead. Avoid having to deal with + * relocations, and the assumption that the first symbol + * is the start of kernel text, by simply removing the + * symbols at this point. + */ + goto discard_symbol; + } else if (curr_map != initial_map) { char dso_name[PATH_MAX]; struct dso *ndso; @@ -824,7 +814,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) } if (count == 0) { - curr_map = map; + curr_map = initial_map; goto add_symbol; } @@ -843,7 +833,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) ndso->kernel = dso->kernel; - curr_map = map__new2(pos->start, ndso, map->type); + curr_map = map__new2(pos->start, ndso); if (curr_map == NULL) { dso__put(ndso); return -1; @@ -858,9 +848,9 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta) pos->end -= delta; } add_symbol: - if (curr_map != map) { + if (curr_map != initial_map) { rb_erase(&pos->rb_node, root); - symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); + symbols__insert(&curr_map->dso->symbols, pos); ++moved; } else ++count; @@ -871,10 +861,10 @@ discard_symbol: symbol__delete(pos); } - if (curr_map != map && + if (curr_map != initial_map && dso->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(kmaps->machine)) { - dso__set_loaded(curr_map->dso, curr_map->type); + dso__set_loaded(curr_map->dso); } return count + moved; @@ -1035,7 +1025,12 @@ out_delete_from: return ret; } -static int do_validate_kcore_modules(const char *filename, struct map *map, +struct map *map_groups__first(struct map_groups *mg) +{ + return maps__first(&mg->maps); +} + +static int do_validate_kcore_modules(const char *filename, struct map_groups *kmaps) { struct rb_root modules = RB_ROOT; @@ -1046,13 +1041,12 @@ static int do_validate_kcore_modules(const char *filename, struct map *map, if (err) return err; - old_map = map_groups__first(kmaps, map->type); + old_map = map_groups__first(kmaps); while (old_map) { struct map *next = map_groups__next(old_map); struct module_info *mi; - if (old_map == map || old_map->start == map->start) { - /* The kernel map */ + if (!__map__is_kmodule(old_map)) { old_map = next; continue; } @@ -1109,7 +1103,7 @@ static int validate_kcore_modules(const char *kallsyms_filename, kallsyms_filename)) return -EINVAL; - if (do_validate_kcore_modules(modules_filename, map, kmaps)) + if (do_validate_kcore_modules(modules_filename, kmaps)) return -EINVAL; return 0; @@ -1138,7 +1132,6 @@ static int validate_kcore_addresses(const char *kallsyms_filename, struct kcore_mapfn_data { struct dso *dso; - enum map_type type; struct list_head maps; }; @@ -1147,7 +1140,7 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) struct kcore_mapfn_data *md = data; struct map *map; - map = map__new2(start, md->dso, md->type); + map = map__new2(start, md->dso); if (map == NULL) return -ENOMEM; @@ -1163,13 +1156,13 @@ static int dso__load_kcore(struct dso *dso, struct map *map, const char *kallsyms_filename) { struct map_groups *kmaps = map__kmaps(map); - struct machine *machine; struct kcore_mapfn_data md; struct map *old_map, *new_map, *replacement_map = NULL; + struct machine *machine; bool is_64_bit; int err, fd; char kcore_filename[PATH_MAX]; - struct symbol *sym; + u64 stext; if (!kmaps) return -EINVAL; @@ -1177,7 +1170,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, machine = kmaps->machine; /* This function requires that the map is the kernel map */ - if (map != machine->vmlinux_maps[map->type]) + if (!__map__is_kernel(map)) return -EINVAL; if (!filename_from_kallsyms_filename(kcore_filename, "kcore", @@ -1189,7 +1182,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map, return -EINVAL; md.dso = dso; - md.type = map->type; INIT_LIST_HEAD(&md.maps); fd = open(kcore_filename, O_RDONLY); @@ -1200,7 +1192,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, } /* Read new maps into temporary lists */ - err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, + err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, &is_64_bit); if (err) goto out_err; @@ -1212,7 +1204,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, } /* Remove old maps */ - old_map = map_groups__first(kmaps, map->type); + old_map = map_groups__first(kmaps); while (old_map) { struct map *next = map_groups__next(old_map); @@ -1220,14 +1212,15 @@ static int dso__load_kcore(struct dso *dso, struct map *map, map_groups__remove(kmaps, old_map); old_map = next; } + machine->trampolines_mapped = false; - /* Find the kernel map using the first symbol */ - sym = dso__first_symbol(dso, map->type); - list_for_each_entry(new_map, &md.maps, node) { - if (sym && sym->start >= new_map->start && - sym->start < new_map->end) { - replacement_map = new_map; - break; + /* Find the kernel map using the '_stext' symbol */ + if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { + list_for_each_entry(new_map, &md.maps, node) { + if (stext >= new_map->start && stext < new_map->end) { + replacement_map = new_map; + break; + } } } @@ -1256,6 +1249,19 @@ static int dso__load_kcore(struct dso *dso, struct map *map, map__put(new_map); } + if (machine__is(machine, "x86_64")) { + u64 addr; + + /* + * If one of the corresponding symbols is there, assume the + * entry trampoline maps are too. + */ + if (!kallsyms__get_function_start(kallsyms_filename, + ENTRY_TRAMPOLINE_NAME, + &addr)) + machine->trampolines_mapped = true; + } + /* * Set the data type and long name so that kcore can be read via * dso__data_read_addr(). @@ -1268,7 +1274,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, close(fd); - if (map->type == MAP__FUNCTION) + if (map->prot & PROT_EXEC) pr_debug("Using %s for kernel object code\n", kcore_filename); else pr_debug("Using %s for kernel data\n", kcore_filename); @@ -1289,14 +1295,10 @@ out_err: * If the kernel is relocated at boot time, kallsyms won't match. Compute the * delta based on the relocation reference symbol. */ -static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) { - struct kmap *kmap = map__kmap(map); u64 addr; - if (!kmap) - return -1; - if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) return 0; @@ -1310,19 +1312,23 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, bool no_kcore) { + struct kmap *kmap = map__kmap(map); u64 delta = 0; if (symbol__restricted_filename(filename, "/proc/kallsyms")) return -1; - if (dso__load_all_kallsyms(dso, filename, map) < 0) + if (!kmap || !kmap->kmaps) return -1; - if (kallsyms__delta(map, filename, &delta)) + if (dso__load_all_kallsyms(dso, filename) < 0) return -1; - symbols__fixup_end(&dso->symbols[map->type]); - symbols__fixup_duplicate(&dso->symbols[map->type]); + if (kallsyms__delta(kmap, filename, &delta)) + return -1; + + symbols__fixup_end(&dso->symbols); + symbols__fixup_duplicate(&dso->symbols); if (dso->kernel == DSO_TYPE_GUEST_KERNEL) dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; @@ -1330,9 +1336,9 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; if (!no_kcore && !dso__load_kcore(dso, map, filename)) - return dso__split_kallsyms_for_kcore(dso, map); + return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso); else - return dso__split_kallsyms(dso, map, delta); + return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map); } int dso__load_kallsyms(struct dso *dso, const char *filename, @@ -1341,8 +1347,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, return __dso__load_kallsyms(dso, filename, map, false); } -static int dso__load_perf_map(const char *map_path, struct dso *dso, - struct map *map) +static int dso__load_perf_map(const char *map_path, struct dso *dso) { char *line = NULL; size_t n; @@ -1379,12 +1384,12 @@ static int dso__load_perf_map(const char *map_path, struct dso *dso, if (len + 2 >= line_len) continue; - sym = symbol__new(start, size, STB_GLOBAL, line + len); + sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); if (sym == NULL) goto out_delete_line; - symbols__insert(&dso->symbols[map->type], sym); + symbols__insert(&dso->symbols, sym); nr_syms++; } @@ -1509,25 +1514,27 @@ int dso__load(struct dso *dso, struct map *map) pthread_mutex_lock(&dso->lock); /* check again under the dso->lock */ - if (dso__loaded(dso, map->type)) { + if (dso__loaded(dso)) { ret = 1; goto out; } + if (map->groups && map->groups->machine) + machine = map->groups->machine; + else + machine = NULL; + if (dso->kernel) { if (dso->kernel == DSO_TYPE_KERNEL) ret = dso__load_kernel_sym(dso, map); else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) ret = dso__load_guest_kernel_sym(dso, map); + if (machine__is(machine, "x86_64")) + machine__map_x86_64_entry_trampolines(machine, dso); goto out; } - if (map->groups && map->groups->machine) - machine = map->groups->machine; - else - machine = NULL; - dso->adjust_symbols = 0; if (perfmap) { @@ -1542,7 +1549,7 @@ int dso__load(struct dso *dso, struct map *map) goto out; } - ret = dso__load_perf_map(map_path, dso, map); + ret = dso__load_perf_map(map_path, dso); dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : DSO_BINARY_TYPE__NOT_FOUND; goto out; @@ -1651,7 +1658,7 @@ int dso__load(struct dso *dso, struct map *map) if (ret > 0) { int nr_plt; - nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map); + nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); if (nr_plt > 0) ret += nr_plt; } @@ -1663,17 +1670,16 @@ out_free: if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) ret = 0; out: - dso__set_loaded(dso, map->type); + dso__set_loaded(dso); pthread_mutex_unlock(&dso->lock); nsinfo__mountns_exit(&nsc); return ret; } -struct map *map_groups__find_by_name(struct map_groups *mg, - enum map_type type, const char *name) +struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) { - struct maps *maps = &mg->maps[type]; + struct maps *maps = &mg->maps; struct map *map; down_read(&maps->lock); @@ -1720,7 +1726,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, else dso->binary_type = DSO_BINARY_TYPE__VMLINUX; dso__set_long_name(dso, vmlinux, vmlinux_allocated); - dso__set_loaded(dso, map->type); + dso__set_loaded(dso); pr_debug("Using %s for symbols\n", symfs_vmlinux); } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 70c16741f50a..1a16438eb3ce 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -57,7 +57,8 @@ struct symbol { u64 start; u64 end; u16 namelen; - u8 binding; + u8 type:4; + u8 binding:4; u8 idle:1; u8 ignore:1; u8 inlined:1; @@ -259,17 +260,16 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, bool no_kcore); int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map); -void dso__insert_symbol(struct dso *dso, enum map_type type, +void dso__insert_symbol(struct dso *dso, struct symbol *sym); -struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, - u64 addr); -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, - const char *name); +struct symbol *dso__find_symbol(struct dso *dso, u64 addr); +struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name); + struct symbol *symbol__next_by_name(struct symbol *sym); -struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); -struct symbol *dso__last_symbol(struct dso *dso, enum map_type type); +struct symbol *dso__first_symbol(struct dso *dso); +struct symbol *dso__last_symbol(struct dso *dso); struct symbol *dso__next_symbol(struct symbol *sym); enum dso_type dso__type_fd(int fd); @@ -288,7 +288,7 @@ void symbol__exit(void); void symbol__elf_init(void); int symbol__annotation_init(void); -struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); +struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name); size_t __symbol__fprintf_symname_offs(const struct symbol *sym, const struct addr_location *al, bool unknown_as_addr, @@ -300,7 +300,6 @@ size_t __symbol__fprintf_symname(const struct symbol *sym, bool unknown_as_addr, FILE *fp); size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); size_t symbol__fprintf(struct symbol *sym, FILE *fp); -bool symbol_type__is_a(char symbol_type, enum map_type map_type); bool symbol__restricted_filename(const char *filename, const char *restricted_filename); int symbol__config_symfs(const struct option *opt __maybe_unused, @@ -308,8 +307,7 @@ int symbol__config_symfs(const struct option *opt __maybe_unused, int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, int kmodule); -int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, - struct map *map); +int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss); char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name); @@ -317,7 +315,7 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel) void symbols__insert(struct rb_root *symbols, struct symbol *sym); void symbols__fixup_duplicate(struct rb_root *symbols); void symbols__fixup_end(struct rb_root *symbols); -void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); +void map_groups__fixup_end(struct map_groups *mg); typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c index 6dd2cb88ccbe..ed0205cc7942 100644 --- a/tools/perf/util/symbol_fprintf.c +++ b/tools/perf/util/symbol_fprintf.c @@ -58,13 +58,13 @@ size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) } size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp) + FILE *fp) { size_t ret = 0; struct rb_node *nd; struct symbol_name_rb_node *pos; - for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); fprintf(fp, "%s\n", pos->sym.name); } diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 68b65b10579b..2048d393ece6 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -302,23 +302,20 @@ int thread__insert_map(struct thread *thread, struct map *map) static int __thread__prepare_access(struct thread *thread) { bool initialized = false; - int i, err = 0; - - for (i = 0; i < MAP__NR_TYPES; ++i) { - struct maps *maps = &thread->mg->maps[i]; - struct map *map; + int err = 0; + struct maps *maps = &thread->mg->maps; + struct map *map; - down_read(&maps->lock); + down_read(&maps->lock); - for (map = maps__first(maps); map; map = map__next(map)) { - err = unwind__prepare_access(thread, map, &initialized); - if (err || initialized) - break; - } - - up_read(&maps->lock); + for (map = maps__first(maps); map; map = map__next(map)) { + err = unwind__prepare_access(thread, map, &initialized); + if (err || initialized) + break; } + up_read(&maps->lock); + return err; } @@ -335,8 +332,6 @@ static int thread__prepare_access(struct thread *thread) static int thread__clone_map_groups(struct thread *thread, struct thread *parent) { - int i; - /* This is new thread, we share map groups for process. */ if (thread->pid_ == parent->pid_) return thread__prepare_access(thread); @@ -348,9 +343,8 @@ static int thread__clone_map_groups(struct thread *thread, } /* But this one is new process, copy maps. */ - for (i = 0; i < MAP__NR_TYPES; ++i) - if (map_groups__clone(thread, parent->mg, i) < 0) - return -ENOMEM; + if (map_groups__clone(thread, parent->mg) < 0) + return -ENOMEM; return 0; } @@ -371,8 +365,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) return thread__clone_map_groups(thread, parent); } -void thread__find_cpumode_addr_location(struct thread *thread, - enum map_type type, u64 addr, +void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, struct addr_location *al) { size_t i; @@ -384,7 +377,7 @@ void thread__find_cpumode_addr_location(struct thread *thread, }; for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { - thread__find_addr_location(thread, cpumodes[i], type, addr, al); + thread__find_symbol(thread, cpumodes[i], addr, al); if (al->map) break; } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 14d44c3235b8..07606aa6998d 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -92,16 +92,13 @@ size_t thread__fprintf(struct thread *thread, FILE *fp); struct thread *thread__main_thread(struct machine *machine, struct thread *thread); -void thread__find_addr_map(struct thread *thread, - u8 cpumode, enum map_type type, u64 addr, - struct addr_location *al); +struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, + struct addr_location *al); -void thread__find_addr_location(struct thread *thread, - u8 cpumode, enum map_type type, u64 addr, - struct addr_location *al); +struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, + u64 addr, struct addr_location *al); -void thread__find_cpumode_addr_location(struct thread *thread, - enum map_type type, u64 addr, +void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, struct addr_location *al); static inline void *thread__priv(struct thread *thread) diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index d7f2113462fb..c85d0d1a65ed 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -103,11 +103,10 @@ out: static int record_header_files(void) { - char *path; + char *path = get_events_file("header_page"); struct stat st; int err = -EIO; - path = get_tracing_file("events/header_page"); if (!path) { pr_debug("can't get tracing/events/header_page"); return -ENOMEM; @@ -128,9 +127,9 @@ static int record_header_files(void) goto out; } - put_tracing_file(path); + put_events_file(path); - path = get_tracing_file("events/header_event"); + path = get_events_file("header_event"); if (!path) { pr_debug("can't get tracing/events/header_event"); err = -ENOMEM; @@ -154,7 +153,7 @@ static int record_header_files(void) err = 0; out: - put_tracing_file(path); + put_events_file(path); return err; } @@ -243,7 +242,7 @@ static int record_ftrace_files(struct tracepoint_path *tps) char *path; int ret; - path = get_tracing_file("events/ftrace"); + path = get_events_file("ftrace"); if (!path) { pr_debug("can't get tracing/events/ftrace"); return -ENOMEM; diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c index 16a776371d03..1aa368603268 100644 --- a/tools/perf/util/trace-event.c +++ b/tools/perf/util/trace-event.c @@ -75,6 +75,7 @@ void trace_event__cleanup(struct trace_event *t) static struct event_format* tp_format(const char *sys, const char *name) { + char *tp_dir = get_events_file(sys); struct pevent *pevent = tevent.pevent; struct event_format *event = NULL; char path[PATH_MAX]; @@ -82,8 +83,11 @@ tp_format(const char *sys, const char *name) char *data; int err; - scnprintf(path, PATH_MAX, "%s/%s/%s/format", - tracing_events_path, sys, name); + if (!tp_dir) + return ERR_PTR(-errno); + + scnprintf(path, PATH_MAX, "%s/%s/format", tp_dir, name); + put_events_file(tp_dir); err = filename__read_str(path, &data, &size); if (err) diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 7bdd239c795c..538db4e5d1e6 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -28,10 +28,11 @@ static int __report_module(struct addr_location *al, u64 ip, { Dwfl_Module *mod; struct dso *dso = NULL; - - thread__find_addr_location(ui->thread, - PERF_RECORD_MISC_USER, - MAP__FUNCTION, ip, al); + /* + * Some callers will use al->sym, so we can't just use the + * cheaper thread__find_map() here. + */ + thread__find_symbol(ui->thread, PERF_RECORD_MISC_USER, ip, al); if (al->map) dso = al->map->dso; @@ -103,19 +104,7 @@ static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr, struct addr_location al; ssize_t size; - thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, - MAP__FUNCTION, addr, &al); - if (!al.map) { - /* - * We've seen cases (softice) where DWARF unwinder went - * through non executable mmaps, which we need to lookup - * in MAP__VARIABLE tree. - */ - thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, - MAP__VARIABLE, addr, &al); - } - - if (!al.map) { + if (!thread__find_map(ui->thread, PERF_RECORD_MISC_USER, addr, &al)) { pr_debug("unwind: no map for %lx\n", (unsigned long)addr); return -1; } diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index af873044d33a..6a11bc7e6b27 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -366,19 +366,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso, static struct map *find_map(unw_word_t ip, struct unwind_info *ui) { struct addr_location al; - - thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, - MAP__FUNCTION, ip, &al); - if (!al.map) { - /* - * We've seen cases (softice) where DWARF unwinder went - * through non executable mmaps, which we need to lookup - * in MAP__VARIABLE tree. - */ - thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, - MAP__VARIABLE, ip, &al); - } - return al.map; + return thread__find_map(ui->thread, PERF_RECORD_MISC_USER, ip, &al); } static int @@ -586,12 +574,9 @@ static int entry(u64 ip, struct thread *thread, struct unwind_entry e; struct addr_location al; - thread__find_addr_location(thread, PERF_RECORD_MISC_USER, - MAP__FUNCTION, ip, &al); - + e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); e.ip = al.addr; e.map = al.map; - e.sym = al.sym; pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", al.sym ? al.sym->name : "''", diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 1019bbc5dbd8..eac5b858a371 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -38,11 +38,43 @@ void perf_set_multithreaded(void) } unsigned int page_size; -int cacheline_size; + +#ifdef _SC_LEVEL1_DCACHE_LINESIZE +#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE) +#else +static void cache_line_size(int *cacheline_sizep) +{ + if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep)) + pr_debug("cannot determine cache line size"); +} +#endif + +int cacheline_size(void) +{ + static int size; + + if (!size) + cache_line_size(&size); + + return size; +} int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK; +int sysctl__max_stack(void) +{ + int value; + + if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) + sysctl_perf_event_max_stack = value; + + if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0) + sysctl_perf_event_max_contexts_per_stack = value; + + return sysctl_perf_event_max_stack; +} + bool test_attr__enabled; bool perf_host = true; diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index c9626c206208..dc58254a2b69 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -43,7 +43,9 @@ size_t hex_width(u64 v); int hex2u64(const char *ptr, u64 *val); extern unsigned int page_size; -extern int cacheline_size; +int __pure cacheline_size(void); + +int sysctl__max_stack(void); int fetch_kernel_version(unsigned int *puint, char *str, size_t str_sz); diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c index 0acb1ec0e2f0..741af209b19d 100644 --- a/tools/perf/util/vdso.c +++ b/tools/perf/util/vdso.c @@ -139,12 +139,10 @@ static enum dso_type machine__thread_dso_type(struct machine *machine, struct thread *thread) { enum dso_type dso_type = DSO__TYPE_UNKNOWN; - struct map *map; - struct dso *dso; + struct map *map = map_groups__first(thread->mg); - map = map_groups__first(thread->mg, MAP__FUNCTION); for (; map ; map = map_groups__next(map)) { - dso = map->dso; + struct dso *dso = map->dso; if (!dso || dso->long_name[0] != '/') continue; dso_type = dso__type(dso, machine); |