aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/alpha/include/uapi/asm/errno.h128
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/arm64/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/mips/include/asm/errno.h17
-rw-r--r--tools/arch/mips/include/uapi/asm/errno.h130
-rw-r--r--tools/arch/parisc/include/uapi/asm/errno.h128
-rw-r--r--tools/arch/powerpc/include/uapi/asm/errno.h10
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h27
-rw-r--r--tools/arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h9
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm_perf.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/perf_regs.h44
-rw-r--r--tools/arch/s390/include/uapi/asm/ptrace.h457
-rw-r--r--tools/arch/sparc/include/uapi/asm/errno.h118
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h585
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h19
-rw-r--r--tools/arch/x86/include/asm/required-features.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/errno.h1
-rw-r--r--tools/bpf/Makefile29
-rw-r--r--tools/bpf/bpf_jit_disasm.c14
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile35
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst118
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst11
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst16
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst12
-rw-r--r--tools/bpf/bpftool/Makefile91
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool78
-rw-r--r--tools/bpf/bpftool/cgroup.c308
-rw-r--r--tools/bpf/bpftool/common.c195
-rw-r--r--tools/bpf/bpftool/jit_disasm.c23
-rw-r--r--tools/bpf/bpftool/main.c52
-rw-r--r--tools/bpf/bpftool/main.h14
-rw-r--r--tools/bpf/bpftool/map.c19
-rw-r--r--tools/bpf/bpftool/prog.c231
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile15
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-disassembler-four-args.c15
-rw-r--r--tools/build/feature/test-libopencsd.c8
-rw-r--r--tools/build/feature/test-pthread-barrier.c12
-rw-r--r--tools/gpio/gpio-event-mon.c10
-rw-r--r--tools/hv/Makefile23
-rw-r--r--tools/hv/hv_kvp_daemon.c70
-rw-r--r--tools/include/asm-generic/bitops/find.h16
-rw-r--r--tools/include/linux/compiler.h21
-rw-r--r--tools/include/linux/kmemcheck.h1
-rw-r--r--tools/include/linux/lockdep.h1
-rw-r--r--tools/include/uapi/asm-generic/bpf_perf_event.h9
-rw-r--r--tools/include/uapi/asm-generic/errno-base.h40
-rw-r--r--tools/include/uapi/asm-generic/errno.h123
-rw-r--r--tools/include/uapi/asm-generic/mman.h1
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h7
-rw-r--r--tools/include/uapi/drm/drm.h41
-rw-r--r--tools/include/uapi/drm/i915_drm.h110
-rw-r--r--tools/include/uapi/linux/bpf.h116
-rw-r--r--tools/include/uapi/linux/bpf_common.h7
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h6
-rw-r--r--tools/include/uapi/linux/if_link.h944
-rw-r--r--tools/include/uapi/linux/kcmp.h1
-rw-r--r--tools/include/uapi/linux/kvm.h99
-rw-r--r--tools/include/uapi/linux/netlink.h251
-rw-r--r--tools/include/uapi/linux/perf_event.h33
-rw-r--r--tools/include/uapi/linux/prctl.h10
-rw-r--r--tools/include/uapi/linux/sched.h5
-rw-r--r--tools/include/uapi/sound/asound.h9
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat74
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt4
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile30
-rw-r--r--tools/lib/bpf/bpf.c135
-rw-r--r--tools/lib/bpf/bpf.h4
-rw-r--r--tools/lib/bpf/libbpf.c260
-rw-r--r--tools/lib/bpf/libbpf.h6
-rw-r--r--tools/lib/bpf/nlattr.c187
-rw-r--r--tools/lib/bpf/nlattr.h72
-rw-r--r--tools/lib/find_bit.c39
-rw-r--r--tools/lib/subcmd/pager.c5
-rw-r--r--tools/lib/traceevent/event-parse.c62
-rw-r--r--tools/lib/traceevent/event-plugin.c24
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c4
-rw-r--r--tools/lib/traceevent/parse-filter.c22
-rw-r--r--tools/objtool/Makefile10
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt15
-rw-r--r--tools/objtool/builtin-orc.c4
-rw-r--r--tools/objtool/check.c190
-rw-r--r--tools/objtool/check.h3
-rw-r--r--tools/objtool/elf.c4
-rw-r--r--tools/objtool/orc_dump.c7
-rw-r--r--tools/objtool/orc_gen.c7
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Build4
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt3
-rw-r--r--tools/perf/Documentation/perf-data.txt4
-rw-r--r--tools/perf/Documentation/perf-evlist.txt4
-rw-r--r--tools/perf/Documentation/perf-inject.txt4
-rw-r--r--tools/perf/Documentation/perf-lock.txt4
-rw-r--r--tools/perf/Documentation/perf-probe.txt18
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Documentation/perf-report.txt37
-rw-r--r--tools/perf/Documentation/perf-sched.txt4
-rw-r--r--tools/perf/Documentation/perf-script.txt47
-rw-r--r--tools/perf/Documentation/perf-timechart.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt20
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt27
-rw-r--r--tools/perf/Documentation/tips.txt2
-rw-r--r--tools/perf/Makefile.config74
-rw-r--r--tools/perf/Makefile.perf17
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c77
-rw-r--r--tools/perf/arch/arm/util/pmu.c6
-rw-r--r--tools/perf/arch/arm64/util/Build5
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c225
-rw-r--r--tools/perf/arch/arm64/util/header.c65
-rw-r--r--tools/perf/arch/arm64/util/sym-handling.c22
-rw-r--r--tools/perf/arch/common.c44
-rw-r--r--tools/perf/arch/common.h1
-rw-r--r--tools/perf/arch/powerpc/util/header.c2
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c8
-rw-r--r--tools/perf/arch/s390/Makefile26
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c3
-rwxr-xr-xtools/perf/arch/s390/entry/syscalls/mksyscalltbl32
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl390
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h2
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c32
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/arch/x86/util/header.c4
-rw-r--r--tools/perf/arch/x86/util/unwind-libunwind.c2
-rw-r--r--tools/perf/bench/futex-hash.c20
-rw-r--r--tools/perf/bench/futex-lock-pi.c23
-rw-r--r--tools/perf/bench/futex-requeue.c22
-rw-r--r--tools/perf/bench/futex-wake-parallel.c46
-rw-r--r--tools/perf/bench/futex-wake.c18
-rw-r--r--tools/perf/bench/numa.c56
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-c2c.c20
-rw-r--r--tools/perf/builtin-help.c6
-rw-r--r--tools/perf/builtin-inject.c3
-rw-r--r--tools/perf/builtin-kvm.c13
-rw-r--r--tools/perf/builtin-record.c108
-rw-r--r--tools/perf/builtin-report.c289
-rw-r--r--tools/perf/builtin-script.c297
-rw-r--r--tools/perf/builtin-stat.c230
-rw-r--r--tools/perf/builtin-top.c196
-rw-r--r--tools/perf/builtin-trace.c131
-rwxr-xr-xtools/perf/check-headers.sh12
-rw-r--r--tools/perf/jvmti/jvmti_agent.c16
-rw-r--r--tools/perf/jvmti/jvmti_agent.h7
-rw-r--r--tools/perf/jvmti/libjvmti.c147
-rw-r--r--tools/perf/perf-completion.sh47
-rw-r--r--tools/perf/perf.c4
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json27
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json27
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/other.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv16
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/cache.json5
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/frontend.json7
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/marked.json27
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json276
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pmc.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/translation.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json555
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json389
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json383
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json1050
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json280
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json506
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json365
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json132
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json981
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json212
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json377
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json132
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json981
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json212
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json243
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv5
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json4180
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json232
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json2008
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json973
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json262
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json257
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json231
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json42
-rw-r--r--tools/perf/pmu-events/jevents.c39
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-record19
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-report3
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py95
-rw-r--r--tools/perf/tests/attr.c6
-rw-r--r--tools/perf/tests/backward-ring-buffer.c9
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/bpf-script-example.c4
-rw-r--r--tools/perf/tests/bpf.c68
-rw-r--r--tools/perf/tests/builtin-test.c10
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c5
-rw-r--r--tools/perf/tests/parse-events.c1
-rw-r--r--tools/perf/tests/perf-record.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh28
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh5
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/task-exit.c6
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/arch_errno_names.c1
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh100
-rw-r--r--tools/perf/trace/beauty/beauty.h5
-rw-r--r--tools/perf/trace/beauty/flock.c10
-rw-r--r--tools/perf/trace/beauty/futex_val3.c18
-rw-r--r--tools/perf/trace/beauty/mmap.c3
-rw-r--r--tools/perf/ui/browsers/annotate.c399
-rw-r--r--tools/perf/ui/browsers/hists.c38
-rw-r--r--tools/perf/ui/browsers/hists.h3
-rw-r--r--tools/perf/ui/gtk/annotate.c25
-rw-r--r--tools/perf/util/Build10
-rw-r--r--tools/perf/util/annotate.c670
-rw-r--r--tools/perf/util/annotate.h76
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.c462
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.h43
-rw-r--r--tools/perf/util/arm-spe.c231
-rw-r--r--tools/perf/util/arm-spe.h31
-rw-r--r--tools/perf/util/auxtrace.c8
-rw-r--r--tools/perf/util/auxtrace.h1
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/callchain.c10
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/cgroup.c3
-rw-r--r--tools/perf/util/cs-etm-decoder/Build1
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c513
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h105
-rw-r--r--tools/perf/util/cs-etm.c1023
-rw-r--r--tools/perf/util/cs-etm.h18
-rw-r--r--tools/perf/util/data.c10
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/env.c47
-rw-r--r--tools/perf/util/env.h2
-rw-r--r--tools/perf/util/event.c8
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/evlist.c101
-rw-r--r--tools/perf/util/evlist.h21
-rw-r--r--tools/perf/util/evsel.c266
-rw-r--r--tools/perf/util/evsel.h27
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh2
-rw-r--r--tools/perf/util/header.c130
-rw-r--r--tools/perf/util/header.h9
-rw-r--r--tools/perf/util/hist.h6
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt-decoder/Build24
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.h10
-rw-r--r--tools/perf/util/intel-pt-decoder/x86-opcode-map.txt15
-rw-r--r--tools/perf/util/intel-pt.c11
-rw-r--r--tools/perf/util/machine.c7
-rw-r--r--tools/perf/util/map.c2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/mmap.c190
-rw-r--r--tools/perf/util/mmap.h16
-rw-r--r--tools/perf/util/ordered-events.c3
-rw-r--r--tools/perf/util/ordered-events.h2
-rw-r--r--tools/perf/util/parse-events.c5
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/path.c14
-rw-r--r--tools/perf/util/path.h3
-rw-r--r--tools/perf/util/pmu.c92
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/probe-event.c85
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/rblist.c19
-rw-r--r--tools/perf/util/rblist.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c3
-rw-r--r--tools/perf/util/session.c54
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c20
-rw-r--r--tools/perf/util/srcline.c9
-rw-r--r--tools/perf/util/srcline.h5
-rw-r--r--tools/perf/util/stat-shadow.c426
-rw-r--r--tools/perf/util/stat.c15
-rw-r--r--tools/perf/util/stat.h63
-rw-r--r--tools/perf/util/string.c46
-rw-r--r--tools/perf/util/string2.h2
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/target.h7
-rw-r--r--tools/perf/util/thread_map.c27
-rw-r--r--tools/perf/util/thread_map.h3
-rw-r--r--tools/perf/util/time-utils.c301
-rw-r--r--tools/perf/util/time-utils.h8
-rw-r--r--tools/perf/util/tool.h1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c9
-rw-r--r--tools/perf/util/unwind-libunwind.c4
-rw-r--r--tools/perf/util/util.c26
-rw-r--r--tools/perf/util/util.h10
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c30
-rw-r--r--tools/power/cpupower/bench/system.c2
-rw-r--r--tools/power/cpupower/lib/cpufreq.h4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c9
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py15
-rw-r--r--tools/scripts/Makefile.include1
-rw-r--r--tools/testing/nvdimm/Kbuild4
-rw-r--r--tools/testing/nvdimm/acpi_nfit_test.c8
-rw-r--r--tools/testing/nvdimm/device_dax_test.c8
-rw-r--r--tools/testing/nvdimm/libnvdimm_test.c8
-rw-r--r--tools/testing/nvdimm/pmem_test.c8
-rw-r--r--tools/testing/nvdimm/test/iomap.c7
-rw-r--r--tools/testing/nvdimm/test/nfit.c498
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h134
-rw-r--r--tools/testing/nvdimm/watermark.h21
-rw-r--r--tools/testing/radix-tree/idr-test.c29
-rw-r--r--tools/testing/radix-tree/linux/kernel.h2
-rw-r--r--tools/testing/selftests/Makefile9
-rw-r--r--tools/testing/selftests/bpf/.gitignore8
-rw-r--r--tools/testing/selftests/bpf/Makefile37
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h5
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/sample_map_ret0.c34
-rw-r--r--tools/testing/selftests/bpf/sample_ret0.c7
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py51
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py83
-rw-r--r--tools/testing/selftests/bpf/test_align.c174
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c11
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh18
-rw-r--r--tools/testing/selftests/bpf/test_l4lb_noinline.c473
-rwxr-xr-xtools/testing/selftests/bpf/test_libbpf.sh49
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c150
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c217
-rw-r--r--tools/testing/selftests/bpf/test_maps.c32
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py1085
-rw-r--r--tools/testing/selftests/bpf/test_progs.c363
-rw-r--r--tools/testing/selftests/bpf/test_stacktrace_map.c62
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h16
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c115
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c126
-rw-r--r--tools/testing/selftests/bpf/test_tracepoint.c26
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c3163
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c7
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh1
-rw-r--r--tools/testing/selftests/bpf/test_xdp_noinline.c833
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh2
-rwxr-xr-xtools/testing/selftests/firmware/fw_fallback.sh172
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh5
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc37
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions10
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc21
-rwxr-xr-xtools/testing/selftests/gen_kselftest_tar.sh4
-rw-r--r--tools/testing/selftests/kselftest.h2
-rwxr-xr-xtools/testing/selftests/kselftest_install.sh4
-rw-r--r--tools/testing/selftests/lib.mk2
-rw-r--r--tools/testing/selftests/media_tests/Makefile7
-rw-r--r--tools/testing/selftests/media_tests/media_device_open.c3
-rw-r--r--tools/testing/selftests/media_tests/media_device_test.c3
-rw-r--r--tools/testing/selftests/media_tests/video_device_test.c3
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c237
-rw-r--r--tools/testing/selftests/memfd/Makefile5
-rw-r--r--tools/testing/selftests/memfd/common.c46
-rw-r--r--tools/testing/selftests/memfd/common.h9
-rw-r--r--tools/testing/selftests/memfd/fuse_test.c44
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c212
-rwxr-xr-xtools/testing/selftests/memfd/run_fuse_test.sh2
-rwxr-xr-xtools/testing/selftests/memfd/run_tests.sh1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh429
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c21
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c21
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh229
-rw-r--r--tools/testing/selftests/nsfs/pidns.c2
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh307
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c491
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/mmap_bench.c53
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/mm/segv_errors.c78
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c4
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c329
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c43
-rw-r--r--tools/testing/selftests/ptp/testptp.c4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config2frag.sh25
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh42
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh1
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c4
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--tools/testing/selftests/vm/Makefile5
-rw-r--r--tools/testing/selftests/vm/compaction_test.c2
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests11
-rw-r--r--tools/testing/selftests/vm/va_128TBswitch.c297
-rw-r--r--tools/testing/selftests/x86/5lvl.c177
-rw-r--r--tools/testing/selftests/x86/Makefile36
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c12
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c32
-rw-r--r--tools/testing/selftests/x86/protection_keys.c28
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c5
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c4
-rw-r--r--tools/testing/selftests/x86/test_vdso.c55
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c505
-rw-r--r--tools/usb/usbip/libsrc/usbip_device_driver.c7
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c24
-rw-r--r--tools/usb/usbip/src/usbip_bind.c9
-rw-r--r--tools/usb/usbip/src/usbip_list.c9
-rw-r--r--tools/usb/usbip/src/usbipd.c2
-rw-r--r--tools/usb/usbip/src/utils.c9
-rw-r--r--tools/virtio/linux/kernel.h2
-rw-r--r--tools/virtio/linux/thread_info.h1
-rw-r--r--tools/virtio/ringtest/main.h59
-rw-r--r--tools/virtio/ringtest/ptr_ring.c31
-rw-r--r--tools/virtio/ringtest/ring.c30
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c24
-rw-r--r--tools/vm/page-types.c28
-rw-r--r--tools/vm/slabinfo-gnuplot.sh2
493 files changed, 34544 insertions, 16594 deletions
diff --git a/tools/arch/alpha/include/uapi/asm/errno.h b/tools/arch/alpha/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..3d265f6babaf
--- /dev/null
+++ b/tools/arch/alpha/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ALPHA_ERRNO_H
+#define _ALPHA_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#undef EAGAIN /* 11 in errno-base.h */
+
+#define EDEADLK 11 /* Resource deadlock would occur */
+
+#define EAGAIN 35 /* Try again */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection because of reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Transport endpoint is already connected */
+#define ENOTCONN 57 /* Transport endpoint is not connected */
+#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 59 /* Too many references: cannot splice */
+#define ETIMEDOUT 60 /* Connection timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+#define ELOOP 62 /* Too many symbolic links encountered */
+#define ENAMETOOLONG 63 /* File name too long */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+
+#define ENOLCK 77 /* No record locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#define ENOMSG 80 /* No message of desired type */
+#define EIDRM 81 /* Identifier removed */
+#define ENOSR 82 /* Out of streams resources */
+#define ETIME 83 /* Timer expired */
+#define EBADMSG 84 /* Not a data message */
+#define EPROTO 85 /* Protocol error */
+#define ENODATA 86 /* No data available */
+#define ENOSTR 87 /* Device not a stream */
+
+#define ENOPKG 92 /* Package not installed */
+
+#define EILSEQ 116 /* Illegal byte sequence */
+
+/* The following are just random noise.. */
+#define ECHRNG 88 /* Channel number out of range */
+#define EL2NSYNC 89 /* Level 2 not synchronized */
+#define EL3HLT 90 /* Level 3 halted */
+#define EL3RST 91 /* Level 3 reset */
+
+#define ELNRNG 93 /* Link number out of range */
+#define EUNATCH 94 /* Protocol driver not attached */
+#define ENOCSI 95 /* No CSI structure available */
+#define EL2HLT 96 /* Level 2 halted */
+#define EBADE 97 /* Invalid exchange */
+#define EBADR 98 /* Invalid request descriptor */
+#define EXFULL 99 /* Exchange full */
+#define ENOANO 100 /* No anode */
+#define EBADRQC 101 /* Invalid request code */
+#define EBADSLT 102 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 104 /* Bad font file format */
+#define ENONET 105 /* Machine is not on the network */
+#define ENOLINK 106 /* Link has been severed */
+#define EADV 107 /* Advertise error */
+#define ESRMNT 108 /* Srmount error */
+#define ECOMM 109 /* Communication error on send */
+#define EMULTIHOP 110 /* Multihop attempted */
+#define EDOTDOT 111 /* RFS specific error */
+#define EOVERFLOW 112 /* Value too large for defined data type */
+#define ENOTUNIQ 113 /* Name not unique on network */
+#define EBADFD 114 /* File descriptor in bad state */
+#define EREMCHG 115 /* Remote address changed */
+
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+
+#define ELIBACC 122 /* Can not access a needed shared library */
+#define ELIBBAD 123 /* Accessing a corrupted shared library */
+#define ELIBSCN 124 /* .lib section in a.out corrupted */
+#define ELIBMAX 125 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 126 /* Cannot exec a shared library directly */
+#define ERESTART 127 /* Interrupted system call should be restarted */
+#define ESTRPIPE 128 /* Streams pipe error */
+
+#define ENOMEDIUM 129 /* No medium found */
+#define EMEDIUMTYPE 130 /* Wrong medium type */
+#define ECANCELED 131 /* Operation Cancelled */
+#define ENOKEY 132 /* Required key not available */
+#define EKEYEXPIRED 133 /* Key has expired */
+#define EKEYREVOKED 134 /* Key has been revoked */
+#define EKEYREJECTED 135 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 136 /* Owner died */
+#define ENOTRECOVERABLE 137 /* State not recoverable */
+
+#define ERFKILL 138 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 139 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 1f57bbe82b6f..6edd177bb1c7 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
+/* PL1 Physical Timer Registers */
+#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
+#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
+
+/* Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 51149ec75fe4..9abbf3044654 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+/* Physical Timer EL0 Registers */
+#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
+#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
+
+/* EL0 Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/arch/mips/include/asm/errno.h b/tools/arch/mips/include/asm/errno.h
new file mode 100644
index 000000000000..21d91cdfe3c9
--- /dev/null
+++ b/tools/arch/mips/include/asm/errno.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _ASM_ERRNO_H
+#define _ASM_ERRNO_H
+
+#include <uapi/asm/errno.h>
+
+
+/* The biggest error number defined here or in <linux/errno.h>. */
+#define EMAXERRNO 1133
+
+#endif /* _ASM_ERRNO_H */
diff --git a/tools/arch/mips/include/uapi/asm/errno.h b/tools/arch/mips/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..2fb714e2d6d8
--- /dev/null
+++ b/tools/arch/mips/include/uapi/asm/errno.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _UAPI_ASM_ERRNO_H
+#define _UAPI_ASM_ERRNO_H
+
+/*
+ * These error numbers are intended to be MIPS ABI compatible
+ */
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define ENOLCK 46 /* No record locks available */
+#define EBADE 50 /* Invalid exchange */
+#define EBADR 51 /* Invalid request descriptor */
+#define EXFULL 52 /* Exchange full */
+#define ENOANO 53 /* No anode */
+#define EBADRQC 54 /* Invalid request code */
+#define EBADSLT 55 /* Invalid slot */
+#define EDEADLOCK 56 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EDOTDOT 73 /* RFS specific error */
+#define EMULTIHOP 74 /* Multihop attempted */
+#define EBADMSG 77 /* Not a data message */
+#define ENAMETOOLONG 78 /* File name too long */
+#define EOVERFLOW 79 /* Value too large for defined data type */
+#define ENOTUNIQ 80 /* Name not unique on network */
+#define EBADFD 81 /* File descriptor in bad state */
+#define EREMCHG 82 /* Remote address changed */
+#define ELIBACC 83 /* Can not access a needed shared library */
+#define ELIBBAD 84 /* Accessing a corrupted shared library */
+#define ELIBSCN 85 /* .lib section in a.out corrupted */
+#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 87 /* Cannot exec a shared library directly */
+#define EILSEQ 88 /* Illegal byte sequence */
+#define ENOSYS 89 /* Function not implemented */
+#define ELOOP 90 /* Too many symbolic links encountered */
+#define ERESTART 91 /* Interrupted system call should be restarted */
+#define ESTRPIPE 92 /* Streams pipe error */
+#define ENOTEMPTY 93 /* Directory not empty */
+#define EUSERS 94 /* Too many users */
+#define ENOTSOCK 95 /* Socket operation on non-socket */
+#define EDESTADDRREQ 96 /* Destination address required */
+#define EMSGSIZE 97 /* Message too long */
+#define EPROTOTYPE 98 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 99 /* Protocol not available */
+#define EPROTONOSUPPORT 120 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
+#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 123 /* Protocol family not supported */
+#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
+#define EADDRINUSE 125 /* Address already in use */
+#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
+#define ENETDOWN 127 /* Network is down */
+#define ENETUNREACH 128 /* Network is unreachable */
+#define ENETRESET 129 /* Network dropped connection because of reset */
+#define ECONNABORTED 130 /* Software caused connection abort */
+#define ECONNRESET 131 /* Connection reset by peer */
+#define ENOBUFS 132 /* No buffer space available */
+#define EISCONN 133 /* Transport endpoint is already connected */
+#define ENOTCONN 134 /* Transport endpoint is not connected */
+#define EUCLEAN 135 /* Structure needs cleaning */
+#define ENOTNAM 137 /* Not a XENIX named type file */
+#define ENAVAIL 138 /* No XENIX semaphores available */
+#define EISNAM 139 /* Is a named type file */
+#define EREMOTEIO 140 /* Remote I/O error */
+#define EINIT 141 /* Reserved */
+#define EREMDEV 142 /* Error 142 */
+#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 144 /* Too many references: cannot splice */
+#define ETIMEDOUT 145 /* Connection timed out */
+#define ECONNREFUSED 146 /* Connection refused */
+#define EHOSTDOWN 147 /* Host is down */
+#define EHOSTUNREACH 148 /* No route to host */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EALREADY 149 /* Operation already in progress */
+#define EINPROGRESS 150 /* Operation now in progress */
+#define ESTALE 151 /* Stale file handle */
+#define ECANCELED 158 /* AIO operation canceled */
+
+/*
+ * These error are Linux extensions.
+ */
+#define ENOMEDIUM 159 /* No medium found */
+#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 165 /* Owner died */
+#define ENOTRECOVERABLE 166 /* State not recoverable */
+
+#define ERFKILL 167 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 168 /* Memory page has hardware error */
+
+#define EDQUOT 1133 /* Quota exceeded */
+
+
+#endif /* _UAPI_ASM_ERRNO_H */
diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..fc0df353ff0d
--- /dev/null
+++ b/tools/arch/parisc/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_ERRNO_H
+#define _PARISC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define EDEADLOCK EDEADLK
+#define ENOLCK 46 /* No record locks available */
+#define EILSEQ 47 /* Illegal byte sequence */
+
+#define ENONET 50 /* Machine is not on the network */
+#define ENODATA 51 /* No data available */
+#define ETIME 52 /* Timer expired */
+#define ENOSR 53 /* Out of streams resources */
+#define ENOSTR 54 /* Device not a stream */
+#define ENOPKG 55 /* Package not installed */
+
+#define ENOLINK 57 /* Link has been severed */
+#define EADV 58 /* Advertise error */
+#define ESRMNT 59 /* Srmount error */
+#define ECOMM 60 /* Communication error on send */
+#define EPROTO 61 /* Protocol error */
+
+#define EMULTIHOP 64 /* Multihop attempted */
+
+#define EDOTDOT 66 /* RFS specific error */
+#define EBADMSG 67 /* Not a data message */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+#define EOVERFLOW 72 /* Value too large for defined data type */
+
+/* these errnos are defined by Linux but not HPUX. */
+
+#define EBADE 160 /* Invalid exchange */
+#define EBADR 161 /* Invalid request descriptor */
+#define EXFULL 162 /* Exchange full */
+#define ENOANO 163 /* No anode */
+#define EBADRQC 164 /* Invalid request code */
+#define EBADSLT 165 /* Invalid slot */
+#define EBFONT 166 /* Bad font file format */
+#define ENOTUNIQ 167 /* Name not unique on network */
+#define EBADFD 168 /* File descriptor in bad state */
+#define EREMCHG 169 /* Remote address changed */
+#define ELIBACC 170 /* Can not access a needed shared library */
+#define ELIBBAD 171 /* Accessing a corrupted shared library */
+#define ELIBSCN 172 /* .lib section in a.out corrupted */
+#define ELIBMAX 173 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 174 /* Cannot exec a shared library directly */
+#define ERESTART 175 /* Interrupted system call should be restarted */
+#define ESTRPIPE 176 /* Streams pipe error */
+#define EUCLEAN 177 /* Structure needs cleaning */
+#define ENOTNAM 178 /* Not a XENIX named type file */
+#define ENAVAIL 179 /* No XENIX semaphores available */
+#define EISNAM 180 /* Is a named type file */
+#define EREMOTEIO 181 /* Remote I/O error */
+#define ENOMEDIUM 182 /* No medium found */
+#define EMEDIUMTYPE 183 /* Wrong medium type */
+#define ENOKEY 184 /* Required key not available */
+#define EKEYEXPIRED 185 /* Key has expired */
+#define EKEYREVOKED 186 /* Key has been revoked */
+#define EKEYREJECTED 187 /* Key was rejected by service */
+
+/* We now return you to your regularly scheduled HPUX. */
+
+#define ENOSYM 215 /* symbol does not exist in executable */
+#define ENOTSOCK 216 /* Socket operation on non-socket */
+#define EDESTADDRREQ 217 /* Destination address required */
+#define EMSGSIZE 218 /* Message too long */
+#define EPROTOTYPE 219 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 220 /* Protocol not available */
+#define EPROTONOSUPPORT 221 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 222 /* Socket type not supported */
+#define EOPNOTSUPP 223 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 224 /* Protocol family not supported */
+#define EAFNOSUPPORT 225 /* Address family not supported by protocol */
+#define EADDRINUSE 226 /* Address already in use */
+#define EADDRNOTAVAIL 227 /* Cannot assign requested address */
+#define ENETDOWN 228 /* Network is down */
+#define ENETUNREACH 229 /* Network is unreachable */
+#define ENETRESET 230 /* Network dropped connection because of reset */
+#define ECONNABORTED 231 /* Software caused connection abort */
+#define ECONNRESET 232 /* Connection reset by peer */
+#define ENOBUFS 233 /* No buffer space available */
+#define EISCONN 234 /* Transport endpoint is already connected */
+#define ENOTCONN 235 /* Transport endpoint is not connected */
+#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 237 /* Too many references: cannot splice */
+#define ETIMEDOUT 238 /* Connection timed out */
+#define ECONNREFUSED 239 /* Connection refused */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EHOSTDOWN 241 /* Host is down */
+#define EHOSTUNREACH 242 /* No route to host */
+
+#define EALREADY 244 /* Operation already in progress */
+#define EINPROGRESS 245 /* Operation now in progress */
+#define EWOULDBLOCK EAGAIN /* Operation would block (Not HPUX compliant) */
+#define ENOTEMPTY 247 /* Directory not empty */
+#define ENAMETOOLONG 248 /* File name too long */
+#define ELOOP 249 /* Too many symbolic links encountered */
+#define ENOSYS 251 /* Function not implemented */
+
+#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
+#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
+#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
+
+/* for robust mutexes */
+#define EOWNERDEAD 254 /* Owner died */
+#define ENOTRECOVERABLE 255 /* State not recoverable */
+
+#define ERFKILL 256 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 257 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/powerpc/include/uapi/asm/errno.h b/tools/arch/powerpc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..cc79856896a1
--- /dev/null
+++ b/tools/arch/powerpc/include/uapi/asm/errno.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_ERRNO_H
+#define _ASM_POWERPC_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#undef EDEADLOCK
+#define EDEADLOCK 58 /* File locking deadlock error */
+
+#endif /* _ASM_POWERPC_ERRNO_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 61d6049f4c1e..833ed9a16adf 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
__u32 ap_encodings[8];
};
+/* For KVM_PPC_GET_CPU_CHAR */
+struct kvm_ppc_cpu_char {
+ __u64 character; /* characteristics of the CPU */
+ __u64 behaviour; /* recommended software behaviour */
+ __u64 character_mask; /* valid bits in character */
+ __u64 behaviour_mask; /* valid bits in behaviour */
+};
+
+/*
+ * Values for character and character_mask.
+ * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
+ */
+#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 (1ULL << 63)
+#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED (1ULL << 62)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 (1ULL << 61)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 (1ULL << 60)
+#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV (1ULL << 59)
+#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
+#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
+#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
+
+#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
+#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
+#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
+
/* Per-vcpu XICS interrupt controller state */
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
@@ -607,6 +632,8 @@ struct kvm_ppc_rmmu_info {
#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..0a8e37a519f2
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include "ptrace.h"
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 9ad172dcd912..4cdaa55fabfe 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -6,10 +6,6 @@
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
@@ -228,6 +224,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
+#define KVM_SYNC_BPBC (1UL << 10)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
@@ -251,7 +248,9 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding1[52]; /* riccb needs to be 64byte aligned */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
+ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
__u8 padding2[192]; /* sdnx needs to be 256byte aligned */
union {
diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h
index c36c97ffdc6f..84606b8cc49e 100644
--- a/tools/arch/s390/include/uapi/asm/kvm_perf.h
+++ b/tools/arch/s390/include/uapi/asm/kvm_perf.h
@@ -4,10 +4,6 @@
*
* Copyright 2014 IBM Corp.
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef __LINUX_KVM_PERF_S390_H
diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d17dd9e5d516
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_S390_PERF_REGS_H
+#define _ASM_S390_PERF_REGS_H
+
+enum perf_event_s390_regs {
+ PERF_REG_S390_R0,
+ PERF_REG_S390_R1,
+ PERF_REG_S390_R2,
+ PERF_REG_S390_R3,
+ PERF_REG_S390_R4,
+ PERF_REG_S390_R5,
+ PERF_REG_S390_R6,
+ PERF_REG_S390_R7,
+ PERF_REG_S390_R8,
+ PERF_REG_S390_R9,
+ PERF_REG_S390_R10,
+ PERF_REG_S390_R11,
+ PERF_REG_S390_R12,
+ PERF_REG_S390_R13,
+ PERF_REG_S390_R14,
+ PERF_REG_S390_R15,
+ PERF_REG_S390_FP0,
+ PERF_REG_S390_FP1,
+ PERF_REG_S390_FP2,
+ PERF_REG_S390_FP3,
+ PERF_REG_S390_FP4,
+ PERF_REG_S390_FP5,
+ PERF_REG_S390_FP6,
+ PERF_REG_S390_FP7,
+ PERF_REG_S390_FP8,
+ PERF_REG_S390_FP9,
+ PERF_REG_S390_FP10,
+ PERF_REG_S390_FP11,
+ PERF_REG_S390_FP12,
+ PERF_REG_S390_FP13,
+ PERF_REG_S390_FP14,
+ PERF_REG_S390_FP15,
+ PERF_REG_S390_MASK,
+ PERF_REG_S390_PC,
+
+ PERF_REG_S390_MAX
+};
+
+#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..543dd70e12c8
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/ptrace.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#ifndef _UAPI_S390_PTRACE_H
+#define _UAPI_S390_PTRACE_H
+
+/*
+ * Offsets in the user_regs_struct. They are used for the ptrace
+ * system call and in entry.S
+ */
+#ifndef __s390x__
+
+#define PT_PSWMASK 0x00
+#define PT_PSWADDR 0x04
+#define PT_GPR0 0x08
+#define PT_GPR1 0x0C
+#define PT_GPR2 0x10
+#define PT_GPR3 0x14
+#define PT_GPR4 0x18
+#define PT_GPR5 0x1C
+#define PT_GPR6 0x20
+#define PT_GPR7 0x24
+#define PT_GPR8 0x28
+#define PT_GPR9 0x2C
+#define PT_GPR10 0x30
+#define PT_GPR11 0x34
+#define PT_GPR12 0x38
+#define PT_GPR13 0x3C
+#define PT_GPR14 0x40
+#define PT_GPR15 0x44
+#define PT_ACR0 0x48
+#define PT_ACR1 0x4C
+#define PT_ACR2 0x50
+#define PT_ACR3 0x54
+#define PT_ACR4 0x58
+#define PT_ACR5 0x5C
+#define PT_ACR6 0x60
+#define PT_ACR7 0x64
+#define PT_ACR8 0x68
+#define PT_ACR9 0x6C
+#define PT_ACR10 0x70
+#define PT_ACR11 0x74
+#define PT_ACR12 0x78
+#define PT_ACR13 0x7C
+#define PT_ACR14 0x80
+#define PT_ACR15 0x84
+#define PT_ORIGGPR2 0x88
+#define PT_FPC 0x90
+/*
+ * A nasty fact of life that the ptrace api
+ * only supports passing of longs.
+ */
+#define PT_FPR0_HI 0x98
+#define PT_FPR0_LO 0x9C
+#define PT_FPR1_HI 0xA0
+#define PT_FPR1_LO 0xA4
+#define PT_FPR2_HI 0xA8
+#define PT_FPR2_LO 0xAC
+#define PT_FPR3_HI 0xB0
+#define PT_FPR3_LO 0xB4
+#define PT_FPR4_HI 0xB8
+#define PT_FPR4_LO 0xBC
+#define PT_FPR5_HI 0xC0
+#define PT_FPR5_LO 0xC4
+#define PT_FPR6_HI 0xC8
+#define PT_FPR6_LO 0xCC
+#define PT_FPR7_HI 0xD0
+#define PT_FPR7_LO 0xD4
+#define PT_FPR8_HI 0xD8
+#define PT_FPR8_LO 0XDC
+#define PT_FPR9_HI 0xE0
+#define PT_FPR9_LO 0xE4
+#define PT_FPR10_HI 0xE8
+#define PT_FPR10_LO 0xEC
+#define PT_FPR11_HI 0xF0
+#define PT_FPR11_LO 0xF4
+#define PT_FPR12_HI 0xF8
+#define PT_FPR12_LO 0xFC
+#define PT_FPR13_HI 0x100
+#define PT_FPR13_LO 0x104
+#define PT_FPR14_HI 0x108
+#define PT_FPR14_LO 0x10C
+#define PT_FPR15_HI 0x110
+#define PT_FPR15_LO 0x114
+#define PT_CR_9 0x118
+#define PT_CR_10 0x11C
+#define PT_CR_11 0x120
+#define PT_IEEE_IP 0x13C
+#define PT_LASTOFF PT_IEEE_IP
+#define PT_ENDREGS 0x140-1
+
+#define GPR_SIZE 4
+#define CR_SIZE 4
+
+#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
+
+#else /* __s390x__ */
+
+#define PT_PSWMASK 0x00
+#define PT_PSWADDR 0x08
+#define PT_GPR0 0x10
+#define PT_GPR1 0x18
+#define PT_GPR2 0x20
+#define PT_GPR3 0x28
+#define PT_GPR4 0x30
+#define PT_GPR5 0x38
+#define PT_GPR6 0x40
+#define PT_GPR7 0x48
+#define PT_GPR8 0x50
+#define PT_GPR9 0x58
+#define PT_GPR10 0x60
+#define PT_GPR11 0x68
+#define PT_GPR12 0x70
+#define PT_GPR13 0x78
+#define PT_GPR14 0x80
+#define PT_GPR15 0x88
+#define PT_ACR0 0x90
+#define PT_ACR1 0x94
+#define PT_ACR2 0x98
+#define PT_ACR3 0x9C
+#define PT_ACR4 0xA0
+#define PT_ACR5 0xA4
+#define PT_ACR6 0xA8
+#define PT_ACR7 0xAC
+#define PT_ACR8 0xB0
+#define PT_ACR9 0xB4
+#define PT_ACR10 0xB8
+#define PT_ACR11 0xBC
+#define PT_ACR12 0xC0
+#define PT_ACR13 0xC4
+#define PT_ACR14 0xC8
+#define PT_ACR15 0xCC
+#define PT_ORIGGPR2 0xD0
+#define PT_FPC 0xD8
+#define PT_FPR0 0xE0
+#define PT_FPR1 0xE8
+#define PT_FPR2 0xF0
+#define PT_FPR3 0xF8
+#define PT_FPR4 0x100
+#define PT_FPR5 0x108
+#define PT_FPR6 0x110
+#define PT_FPR7 0x118
+#define PT_FPR8 0x120
+#define PT_FPR9 0x128
+#define PT_FPR10 0x130
+#define PT_FPR11 0x138
+#define PT_FPR12 0x140
+#define PT_FPR13 0x148
+#define PT_FPR14 0x150
+#define PT_FPR15 0x158
+#define PT_CR_9 0x160
+#define PT_CR_10 0x168
+#define PT_CR_11 0x170
+#define PT_IEEE_IP 0x1A8
+#define PT_LASTOFF PT_IEEE_IP
+#define PT_ENDREGS 0x1B0-1
+
+#define GPR_SIZE 8
+#define CR_SIZE 8
+
+#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
+
+#endif /* __s390x__ */
+
+#define NUM_GPRS 16
+#define NUM_FPRS 16
+#define NUM_CRS 16
+#define NUM_ACRS 16
+
+#define NUM_CR_WORDS 3
+
+#define FPR_SIZE 8
+#define FPC_SIZE 4
+#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
+#define ACR_SIZE 4
+
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+typedef union {
+ float f;
+ double d;
+ __u64 ui;
+ struct
+ {
+ __u32 hi;
+ __u32 lo;
+ } fp;
+} freg_t;
+
+typedef struct {
+ __u32 fpc;
+ __u32 pad;
+ freg_t fprs[NUM_FPRS];
+} s390_fp_regs;
+
+#define FPC_EXCEPTION_MASK 0xF8000000
+#define FPC_FLAGS_MASK 0x00F80000
+#define FPC_DXC_MASK 0x0000FF00
+#define FPC_RM_MASK 0x00000003
+
+/* this typedef defines how a Program Status Word looks like */
+typedef struct {
+ unsigned long mask;
+ unsigned long addr;
+} __attribute__ ((aligned(8))) psw_t;
+
+#ifndef __s390x__
+
+#define PSW_MASK_PER 0x40000000UL
+#define PSW_MASK_DAT 0x04000000UL
+#define PSW_MASK_IO 0x02000000UL
+#define PSW_MASK_EXT 0x01000000UL
+#define PSW_MASK_KEY 0x00F00000UL
+#define PSW_MASK_BASE 0x00080000UL /* always one */
+#define PSW_MASK_MCHECK 0x00040000UL
+#define PSW_MASK_WAIT 0x00020000UL
+#define PSW_MASK_PSTATE 0x00010000UL
+#define PSW_MASK_ASC 0x0000C000UL
+#define PSW_MASK_CC 0x00003000UL
+#define PSW_MASK_PM 0x00000F00UL
+#define PSW_MASK_RI 0x00000000UL
+#define PSW_MASK_EA 0x00000000UL
+#define PSW_MASK_BA 0x00000000UL
+
+#define PSW_MASK_USER 0x0000FF00UL
+
+#define PSW_ADDR_AMODE 0x80000000UL
+#define PSW_ADDR_INSN 0x7FFFFFFFUL
+
+#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
+
+#define PSW_ASC_PRIMARY 0x00000000UL
+#define PSW_ASC_ACCREG 0x00004000UL
+#define PSW_ASC_SECONDARY 0x00008000UL
+#define PSW_ASC_HOME 0x0000C000UL
+
+#else /* __s390x__ */
+
+#define PSW_MASK_PER 0x4000000000000000UL
+#define PSW_MASK_DAT 0x0400000000000000UL
+#define PSW_MASK_IO 0x0200000000000000UL
+#define PSW_MASK_EXT 0x0100000000000000UL
+#define PSW_MASK_BASE 0x0000000000000000UL
+#define PSW_MASK_KEY 0x00F0000000000000UL
+#define PSW_MASK_MCHECK 0x0004000000000000UL
+#define PSW_MASK_WAIT 0x0002000000000000UL
+#define PSW_MASK_PSTATE 0x0001000000000000UL
+#define PSW_MASK_ASC 0x0000C00000000000UL
+#define PSW_MASK_CC 0x0000300000000000UL
+#define PSW_MASK_PM 0x00000F0000000000UL
+#define PSW_MASK_RI 0x0000008000000000UL
+#define PSW_MASK_EA 0x0000000100000000UL
+#define PSW_MASK_BA 0x0000000080000000UL
+
+#define PSW_MASK_USER 0x0000FF0180000000UL
+
+#define PSW_ADDR_AMODE 0x0000000000000000UL
+#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
+
+#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
+
+#define PSW_ASC_PRIMARY 0x0000000000000000UL
+#define PSW_ASC_ACCREG 0x0000400000000000UL
+#define PSW_ASC_SECONDARY 0x0000800000000000UL
+#define PSW_ASC_HOME 0x0000C00000000000UL
+
+#endif /* __s390x__ */
+
+
+/*
+ * The s390_regs structure is used to define the elf_gregset_t.
+ */
+typedef struct {
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+ unsigned int acrs[NUM_ACRS];
+ unsigned long orig_gpr2;
+} s390_regs;
+
+/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+ unsigned long args[1];
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
+/*
+ * Now for the user space program event recording (trace) definitions.
+ * The following structures are used only for the ptrace interface, don't
+ * touch or even look at it if you don't want to modify the user-space
+ * ptrace interface. In particular stay away from it for in-kernel PER.
+ */
+typedef struct {
+ unsigned long cr[NUM_CR_WORDS];
+} per_cr_words;
+
+#define PER_EM_MASK 0xE8000000UL
+
+typedef struct {
+#ifdef __s390x__
+ unsigned : 32;
+#endif /* __s390x__ */
+ unsigned em_branching : 1;
+ unsigned em_instruction_fetch : 1;
+ /*
+ * Switching on storage alteration automatically fixes
+ * the storage alteration event bit in the users std.
+ */
+ unsigned em_storage_alteration : 1;
+ unsigned em_gpr_alt_unused : 1;
+ unsigned em_store_real_address : 1;
+ unsigned : 3;
+ unsigned branch_addr_ctl : 1;
+ unsigned : 1;
+ unsigned storage_alt_space_ctl : 1;
+ unsigned : 21;
+ unsigned long starting_addr;
+ unsigned long ending_addr;
+} per_cr_bits;
+
+typedef struct {
+ unsigned short perc_atmid;
+ unsigned long address;
+ unsigned char access_id;
+} per_lowcore_words;
+
+typedef struct {
+ unsigned perc_branching : 1;
+ unsigned perc_instruction_fetch : 1;
+ unsigned perc_storage_alteration : 1;
+ unsigned perc_gpr_alt_unused : 1;
+ unsigned perc_store_real_address : 1;
+ unsigned : 3;
+ unsigned atmid_psw_bit_31 : 1;
+ unsigned atmid_validity_bit : 1;
+ unsigned atmid_psw_bit_32 : 1;
+ unsigned atmid_psw_bit_5 : 1;
+ unsigned atmid_psw_bit_16 : 1;
+ unsigned atmid_psw_bit_17 : 1;
+ unsigned si : 2;
+ unsigned long address;
+ unsigned : 4;
+ unsigned access_id : 4;
+} per_lowcore_bits;
+
+typedef struct {
+ union {
+ per_cr_words words;
+ per_cr_bits bits;
+ } control_regs;
+ /*
+ * The single_step and instruction_fetch bits are obsolete,
+ * the kernel always sets them to zero. To enable single
+ * stepping use ptrace(PTRACE_SINGLESTEP) instead.
+ */
+ unsigned single_step : 1;
+ unsigned instruction_fetch : 1;
+ unsigned : 30;
+ /*
+ * These addresses are copied into cr10 & cr11 if single
+ * stepping is switched off
+ */
+ unsigned long starting_addr;
+ unsigned long ending_addr;
+ union {
+ per_lowcore_words words;
+ per_lowcore_bits bits;
+ } lowcore;
+} per_struct;
+
+typedef struct {
+ unsigned int len;
+ unsigned long kernel_addr;
+ unsigned long process_addr;
+} ptrace_area;
+
+/*
+ * S/390 specific non posix ptrace requests. I chose unusual values so
+ * they are unlikely to clash with future ptrace definitions.
+ */
+#define PTRACE_PEEKUSR_AREA 0x5000
+#define PTRACE_POKEUSR_AREA 0x5001
+#define PTRACE_PEEKTEXT_AREA 0x5002
+#define PTRACE_PEEKDATA_AREA 0x5003
+#define PTRACE_POKETEXT_AREA 0x5004
+#define PTRACE_POKEDATA_AREA 0x5005
+#define PTRACE_GET_LAST_BREAK 0x5006
+#define PTRACE_PEEK_SYSTEM_CALL 0x5007
+#define PTRACE_POKE_SYSTEM_CALL 0x5008
+#define PTRACE_ENABLE_TE 0x5009
+#define PTRACE_DISABLE_TE 0x5010
+#define PTRACE_TE_ABORT_RAND 0x5011
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
+
+/*
+ * PT_PROT definition is loosely based on hppa bsd definition in
+ * gdb/hppab-nat.c
+ */
+#define PTRACE_PROT 21
+
+typedef enum {
+ ptprot_set_access_watchpoint,
+ ptprot_set_write_watchpoint,
+ ptprot_disable_watchpoint
+} ptprot_flags;
+
+typedef struct {
+ unsigned long lowaddr;
+ unsigned long hiaddr;
+ ptprot_flags prot;
+} ptprot_area;
+
+/* Sequence of bytes for breakpoint illegal instruction. */
+#define S390_BREAKPOINT {0x0,0x1}
+#define S390_BREAKPOINT_U16 ((__u16)0x0001)
+#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
+#define S390_SYSCALL_SIZE 2
+
+/*
+ * The user_regs_struct defines the way the user registers are
+ * store on the stack for signal handling.
+ */
+struct user_regs_struct {
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+ unsigned int acrs[NUM_ACRS];
+ unsigned long orig_gpr2;
+ s390_fp_regs fp_regs;
+ /*
+ * These per registers are in here so that gdb can modify them
+ * itself as there is no "official" ptrace interface for hardware
+ * watchpoints. This is the way intel does it.
+ */
+ per_struct per_info;
+ unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_S390_PTRACE_H */
diff --git a/tools/arch/sparc/include/uapi/asm/errno.h b/tools/arch/sparc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..81a732b902ee
--- /dev/null
+++ b/tools/arch/sparc/include/uapi/asm/errno.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _SPARC_ERRNO_H
+#define _SPARC_ERRNO_H
+
+/* These match the SunOS error numbering scheme. */
+
+#include <asm-generic/errno-base.h>
+
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Op not supported on transport endpoint */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Net dropped connection because of reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Transport endpoint is already connected */
+#define ENOTCONN 57 /* Transport endpoint is not connected */
+#define ESHUTDOWN 58 /* No send after transport endpoint shutdown */
+#define ETOOMANYREFS 59 /* Too many references: cannot splice */
+#define ETIMEDOUT 60 /* Connection timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+#define ELOOP 62 /* Too many symbolic links encountered */
+#define ENAMETOOLONG 63 /* File name too long */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+#define EPROCLIM 67 /* SUNOS: Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+#define ENOSTR 72 /* Device not a stream */
+#define ETIME 73 /* Timer expired */
+#define ENOSR 74 /* Out of streams resources */
+#define ENOMSG 75 /* No message of desired type */
+#define EBADMSG 76 /* Not a data message */
+#define EIDRM 77 /* Identifier removed */
+#define EDEADLK 78 /* Resource deadlock would occur */
+#define ENOLCK 79 /* No record locks available */
+#define ENONET 80 /* Machine is not on the network */
+#define ERREMOTE 81 /* SunOS: Too many lvls of remote in path */
+#define ENOLINK 82 /* Link has been severed */
+#define EADV 83 /* Advertise error */
+#define ESRMNT 84 /* Srmount error */
+#define ECOMM 85 /* Communication error on send */
+#define EPROTO 86 /* Protocol error */
+#define EMULTIHOP 87 /* Multihop attempted */
+#define EDOTDOT 88 /* RFS specific error */
+#define EREMCHG 89 /* Remote address changed */
+#define ENOSYS 90 /* Function not implemented */
+
+/* The rest have no SunOS equivalent. */
+#define ESTRPIPE 91 /* Streams pipe error */
+#define EOVERFLOW 92 /* Value too large for defined data type */
+#define EBADFD 93 /* File descriptor in bad state */
+#define ECHRNG 94 /* Channel number out of range */
+#define EL2NSYNC 95 /* Level 2 not synchronized */
+#define EL3HLT 96 /* Level 3 halted */
+#define EL3RST 97 /* Level 3 reset */
+#define ELNRNG 98 /* Link number out of range */
+#define EUNATCH 99 /* Protocol driver not attached */
+#define ENOCSI 100 /* No CSI structure available */
+#define EL2HLT 101 /* Level 2 halted */
+#define EBADE 102 /* Invalid exchange */
+#define EBADR 103 /* Invalid request descriptor */
+#define EXFULL 104 /* Exchange full */
+#define ENOANO 105 /* No anode */
+#define EBADRQC 106 /* Invalid request code */
+#define EBADSLT 107 /* Invalid slot */
+#define EDEADLOCK 108 /* File locking deadlock error */
+#define EBFONT 109 /* Bad font file format */
+#define ELIBEXEC 110 /* Cannot exec a shared library directly */
+#define ENODATA 111 /* No data available */
+#define ELIBBAD 112 /* Accessing a corrupted shared library */
+#define ENOPKG 113 /* Package not installed */
+#define ELIBACC 114 /* Can not access a needed shared library */
+#define ENOTUNIQ 115 /* Name not unique on network */
+#define ERESTART 116 /* Interrupted syscall should be restarted */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EILSEQ 122 /* Illegal byte sequence */
+#define ELIBMAX 123 /* Atmpt to link in too many shared libs */
+#define ELIBSCN 124 /* .lib section in a.out corrupted */
+
+#define ENOMEDIUM 125 /* No medium found */
+#define EMEDIUMTYPE 126 /* Wrong medium type */
+#define ECANCELED 127 /* Operation Cancelled */
+#define ENOKEY 128 /* Required key not available */
+#define EKEYEXPIRED 129 /* Key has expired */
+#define EKEYREVOKED 130 /* Key has been revoked */
+#define EKEYREJECTED 131 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 132 /* Owner died */
+#define ENOTRECOVERABLE 133 /* State not recoverable */
+
+#define ERFKILL 134 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 135 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 793690fbda36..0dfe4d3f74e2 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,173 +13,176 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 18 /* N 32-bit words worth of info */
-#define NBUGINTS 1 /* N 32-bit bug flags */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
+#define NBUGINTS 1 /* N 32-bit bug flags */
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
+ *
+ * When adding new features here that depend on other features,
+ * please update the table in kernel/cpu/cpuid-deps.c as well.
*/
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
- /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
+/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
+#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
+#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
+#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
+#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
+#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
+#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
-#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
-#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+
+/* CPU types for specific tunings: */
+#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
+#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
+#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
+#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
+#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
+#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
+#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
+#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
+#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
+#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
+#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
+#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
+#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
+#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
+#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
+#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
+#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
-
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */
-#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
+#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
+#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
+#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
+#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
+#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
+#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -187,146 +190,174 @@
*
* Reuse free bits when adding new feature flags!
*/
-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
-#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
-#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
+#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
+#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
-#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
/* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
-
-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
-#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
-
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
-
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
-#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
-
-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
-#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
-
-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
-#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
-#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
-#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
-#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
-#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
-#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
-#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
-#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
-
-/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
-#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
-#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
-#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
+#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
+
+#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
+#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
+#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
+#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
+#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
+#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+
+/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
+#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
+#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
+#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
+#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
+#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
+#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
+#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
+#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
+#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
+#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
+#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
+#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
+#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
+#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
+#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
+#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
+
+/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
/*
* BUG word(s)
*/
-#define X86_BUG(x) (NCAPINTS*32 + (x))
-
-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG(x) (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
* to avoid confusion.
*/
-#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
-#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
-#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
-#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index c10c9128f54e..33833d1909af 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
+#ifdef CONFIG_X86_INTEL_UMIP
+# define DISABLE_UMIP 0
+#else
+# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
+#endif
+
#ifdef CONFIG_X86_64
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -44,6 +50,12 @@
# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define DISABLE_PTI 0
+#else
+# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -54,7 +66,7 @@
#define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
-#define DISABLED_MASK7 0
+#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX)
#define DISABLED_MASK10 0
@@ -63,8 +75,9 @@
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index d91ba04dd007..fb3a6de7440b 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -106,6 +106,7 @@
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57)
#define REQUIRED_MASK17 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/uapi/asm/errno.h b/tools/arch/x86/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..4c82b503d92f
--- /dev/null
+++ b/tools/arch/x86/include/uapi/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 07a6697466ef..c8ec0ae16bf0 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -9,6 +9,35 @@ MAKE = make
CFLAGS += -Wall -O2
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+FEATURE_USER = .bpf
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
%.yacc.c: %.y
$(YACC) -o $@ -d $<
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 75bf526a0168..58c2bab4ef6e 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -72,7 +72,14 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
disassemble_init_for_target(&info);
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
disassemble = disassembler(bfdf);
+#endif
assert(disassemble);
do {
@@ -165,7 +172,8 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
{
char *ptr, *pptr, *tmp;
off_t off = 0;
- int ret, flen, proglen, pass, ulen = 0;
+ unsigned int proglen;
+ int ret, flen, pass, ulen = 0;
regmatch_t pmatch[1];
unsigned long base;
regex_t regex;
@@ -192,7 +200,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
}
ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
- ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
+ ret = sscanf(ptr, "flen=%d proglen=%u pass=%d image=%lx",
&flen, &proglen, &pass, &base);
if (ret != 4) {
regfree(&regex);
@@ -232,7 +240,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
}
assert(ulen == proglen);
- printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
+ printf("%u bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
proglen, pass, flen);
printf("%lx + <x>:\n", base);
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index bde77d7c4390..a9d47c1558bb 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -3,12 +3,16 @@ include ../../../scripts/utilities.mak
INSTALL ?= install
RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
-# Make the path relative to DESTDIR, not prefix
-ifndef DESTDIR
-prefix?=$(HOME)
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
endif
-mandir ?= $(prefix)/share/man
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
MAN8_RST = $(wildcard *.rst)
@@ -19,16 +23,27 @@ DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
man: man8
man8: $(DOC_MAN8)
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
$(OUTPUT)%.8: %.rst
- rst2man $< > $@
+ifndef RST2MAN_DEP
+ $(error "rst2man not found, but required to generate man pages")
+endif
+ $(QUIET_GEN)rst2man $< > $@
clean:
- $(call QUIET_CLEAN, Documentation) $(RM) $(DOC_MAN8)
+ $(call QUIET_CLEAN, Documentation)
+ $(Q)$(RM) $(DOC_MAN8)
install: man
- $(call QUIET_INSTALL, Documentation-man) \
- $(INSTALL) -d -m 755 $(DESTDIR)$(man8dir); \
- $(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir);
+ $(call QUIET_INSTALL, Documentation-man)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
+ $(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
+
+uninstall:
+ $(call QUIET_UNINST, Documentation-man)
+ $(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
+ $(Q)$(RMDIR) $(DESTDIR)$(man8dir)
-.PHONY: man man8 clean install
+.PHONY: man man8 clean install uninstall
.DEFAULT_GOAL := man
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
new file mode 100644
index 000000000000..0e4e923235b6
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -0,0 +1,118 @@
+================
+bpftool-cgroup
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF progs
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **cgroup** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+
+ *COMMANDS* :=
+ { **show** | **list** | **attach** | **detach** | **help** }
+
+MAP COMMANDS
+=============
+
+| **bpftool** **cgroup { show | list }** *CGROUP*
+| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+| **bpftool** **cgroup help**
+|
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** }
+| *ATTACH_FLAGS* := { **multi** | **override** }
+
+DESCRIPTION
+===========
+ **bpftool cgroup { show | list }** *CGROUP*
+ List all programs attached to the cgroup *CGROUP*.
+
+ Output will start with program ID followed by attach type,
+ attach flags and program name.
+
+ **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+ Attach program *PROG* to the cgroup *CGROUP* with attach type
+ *ATTACH_TYPE* and optional *ATTACH_FLAGS*.
+
+ *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs
+ some bpf program, the program in this cgroup yields to sub-cgroup
+ program; **multi** if a sub-cgroup installs some bpf program,
+ that cgroup program gets run in addition to the program in this
+ cgroup.
+
+ Only one program is allowed to be attached to a cgroup with
+ no attach flags or the **override** flag. Attaching another
+ program will release old program and attach the new one.
+
+ Multiple programs are allowed to be attached to a cgroup with
+ **multi**. They are executed in FIFO order (those that were
+ attached first, run first).
+
+ Non-default *ATTACH_FLAGS* are supported by kernel version 4.14
+ and later.
+
+ *ATTACH_TYPE* can be on of:
+ **ingress** ingress path of the inet socket (since 4.10);
+ **egress** egress path of the inet socket (since 4.10);
+ **sock_create** opening of an inet socket (since 4.10);
+ **sock_ops** various socket operations (since 4.12);
+ **device** device access (since 4.15).
+
+ **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+ Detach *PROG* from the cgroup *CGROUP* and attach type
+ *ATTACH_TYPE*.
+
+ **bpftool prog help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -v, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+ -f, --bpffs
+ Show file names of pinned programs.
+
+EXAMPLES
+========
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# mkdir /sys/fs/cgroup/test.slice**
+| **# bpftool prog load ./device_cgroup.o /sys/fs/bpf/prog**
+| **# bpftool cgroup attach /sys/fs/cgroup/test.slice/ device id 1 allow_multi**
+
+**# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+ ID AttachType AttachFlags Name
+ 1 device allow_multi bpf_prog1
+
+|
+| **# bpftool cgroup detach /sys/fs/cgroup/test.slice/ device id 1**
+| **# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+ ID AttachType AttachFlags Name
+
+SEE ALSO
+========
+ **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 9f51a268eb06..457e868bd32f 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -15,13 +15,13 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+ { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
| **pin** | **help** }
MAP COMMANDS
=============
-| **bpftool** **map show** [*MAP*]
+| **bpftool** **map { show | list }** [*MAP*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* **key** *BYTES* **value** *VALUE* [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* **key** *BYTES*
@@ -31,12 +31,13 @@ MAP COMMANDS
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
-| *VALUE* := { *BYTES* | *MAP* | *PROGRAM* }
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *VALUE* := { *BYTES* | *MAP* | *PROG* }
| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** }
DESCRIPTION
===========
- **bpftool map show** [*MAP*]
+ **bpftool map { show | list }** [*MAP*]
Show information about loaded maps. If *MAP* is specified
show information only about given map, otherwise list all
maps currently loaded on the system.
@@ -128,4 +129,4 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-prog**\ (8)
+ **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 36e8d1c3c40d..e4ceee7f2dff 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -15,22 +15,23 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **dump xlated** | **dump jited** | **pin** | **help** }
+ { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
MAP COMMANDS
=============
-| **bpftool** **prog show** [*PROG*]
+| **bpftool** **prog { show | list }** [*PROG*]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
+| **bpftool** **prog load** *OBJ* *FILE*
| **bpftool** **prog help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
DESCRIPTION
===========
- **bpftool prog show** [*PROG*]
+ **bpftool prog { show | list }** [*PROG*]
Show information about loaded programs. If *PROG* is
specified show information only about given program, otherwise
list all programs currently loaded on the system.
@@ -57,6 +58,11 @@ DESCRIPTION
Note: *FILE* must be located in *bpffs* mount.
+ **bpftool prog load** *OBJ* *FILE*
+ Load bpf program from binary *OBJ* and pin as *FILE*.
+
+ Note: *FILE* must be located in *bpffs* mount.
+
**bpftool prog help**
Print short help message.
@@ -126,8 +132,10 @@ EXAMPLES
|
| **# mount -t bpf none /sys/fs/bpf/**
| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
+| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
| **# ls -l /sys/fs/bpf/**
| -rw------- 1 root root 0 Jul 22 01:43 prog
+| -rw------- 1 root root 0 Jul 22 01:44 prog2
**# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
@@ -147,4 +155,4 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-map**\ (8)
+ **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 926c03d5a8da..20689a321ffe 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,17 +16,19 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** }
+ *OBJECT* := { **map** | **program** | **cgroup** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
*MAP-COMMANDS* :=
- { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+ { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
| **pin** | **help** }
- *PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin**
- | **help** }
+ *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
+ | **load** | **help** }
+
+ *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
DESCRIPTION
===========
@@ -53,4 +55,4 @@ OPTIONS
SEE ALSO
========
- **bpftool-map**\ (8), **bpftool-prog**\ (8)
+ **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 813826c50936..26901ec87361 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,25 +1,10 @@
include ../../scripts/Makefile.include
-
include ../../scripts/utilities.mak
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-ifneq ($(objtree),)
-#$(info Determined 'objtree' to be $(objtree))
-endif
-
-ifneq ($(OUTPUT),)
-#$(info Determined 'OUTPUT' to be $(OUTPUT))
-# Adding $(OUTPUT) as a directory to look for source files,
-# because use generated output files as sources dependency
-# for flex/bison parsers.
-VPATH += $(OUTPUT)
-export VPATH
endif
ifeq ($(V),1)
@@ -28,16 +13,18 @@ else
Q = @
endif
-BPF_DIR = $(srctree)/tools/lib/bpf/
+BPF_DIR = $(srctree)/tools/lib/bpf/
ifneq ($(OUTPUT),)
- BPF_PATH=$(OUTPUT)
+ BPF_PATH = $(OUTPUT)
else
- BPF_PATH=$(BPF_DIR)
+ BPF_PATH = $(BPF_DIR)
endif
LIBBPF = $(BPF_PATH)libbpf.a
+BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+
$(LIBBPF): FORCE
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
@@ -45,22 +32,50 @@ $(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
-prefix = /usr
-bash_compdir ?= $(prefix)/share/bash-completion/completions
+prefix ?= /usr/local
+bash_compdir ?= /usr/share/bash-completion/completions
CC = gcc
CFLAGS += -O2
CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
-CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
+INSTALL ?= install
+RM ?= rm -f
+
+FEATURE_USER = .bpftool
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
include $(wildcard *.d)
all: $(OUTPUT)bpftool
-SRCS=$(wildcard *.c)
-OBJS=$(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+SRCS = $(wildcard *.c)
+OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -73,20 +88,34 @@ $(OUTPUT)%.o: %.c
clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool)
- $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+
+install: $(OUTPUT)bpftool
+ $(call QUIET_INSTALL, bpftool)
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
+ $(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
+ $(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
-install:
- install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
- install -m 0755 -d $(bash_compdir)
- install -m 0644 bash-completion/bpftool $(bash_compdir)
+uninstall:
+ $(call QUIET_UNINST, bpftool)
+ $(Q)$(RM) $(DESTDIR)$(prefix)/sbin/bpftool
+ $(Q)$(RM) $(DESTDIR)$(bash_compdir)/bpftool
doc:
- $(Q)$(MAKE) -C Documentation/
+ $(call descend,Documentation)
+
+doc-clean:
+ $(call descend,Documentation,clean)
doc-install:
- $(Q)$(MAKE) -C Documentation/ install
+ $(call descend,Documentation,install)
+
+doc-uninstall:
+ $(call descend,Documentation,uninstall)
FORCE:
-.PHONY: all clean FORCE
+.PHONY: all FORCE clean install uninstall
+.PHONY: doc doc-clean doc-install doc-uninstall
.DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 7febee05c8e7..08719c54a614 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -52,16 +52,24 @@ _bpftool_once_attr()
done
}
-# Takes a list of words in argument; adds them all to COMPREPLY if none of them
-# is already present on the command line. Returns no value.
-_bpftool_one_of_list()
+# Takes a list of words as argument; if any of those words is present on the
+# command line, return 0. Otherwise, return 1.
+_bpftool_search_list()
{
local w idx
for w in $*; do
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
- [[ $w == ${words[idx]} ]] && return 1
+ [[ $w == ${words[idx]} ]] && return 0
done
done
+ return 1
+}
+
+# Takes a list of words in argument; adds them all to COMPREPLY if none of them
+# is already present on the command line. Returns no value.
+_bpftool_one_of_list()
+{
+ _bpftool_search_list $* && return 1
COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
}
@@ -197,7 +205,7 @@ _bpftool()
local PROG_TYPE='id pinned tag'
case $command in
- show)
+ show|list)
[[ $prev != "$command" ]] && return 0
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
return 0
@@ -230,17 +238,21 @@ _bpftool()
fi
return 0
;;
+ load)
+ _filedir
+ return 0
+ ;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'dump help pin show' -- \
- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'dump help pin load \
+ show list' -- "$cur" ) )
;;
esac
;;
map)
local MAP_TYPE='id pinned'
case $command in
- show|dump)
+ show|list|dump)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
@@ -343,7 +355,55 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin show update' -- "$cur" ) )
+ lookup pin show list update' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ cgroup)
+ case $command in
+ show|list)
+ _filedir
+ return 0
+ ;;
+ attach|detach)
+ local ATTACH_TYPES='ingress egress sock_create sock_ops \
+ device'
+ local ATTACH_FLAGS='multi override'
+ local PROG_TYPE='id pinned tag'
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ ingress|egress|sock_create|sock_ops|device)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ *)
+ if ! _bpftool_search_list "$ATTACH_TYPES"; then
+ COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- \
+ "$cur" ) )
+ elif [[ "$command" == "attach" ]]; then
+ # We have an attach type on the command line,
+ # but it is not the previous word, or
+ # "id|pinned|tag" (we already checked for
+ # that). This should only leave the case when
+ # we need attach flags for "attach" commamnd.
+ _bpftool_one_of_list "$ATTACH_FLAGS"
+ fi
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help attach detach \
+ show list' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
new file mode 100644
index 000000000000..cae32a61cb18
--- /dev/null
+++ b/tools/bpf/bpftool/cgroup.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2017 Facebook
+// Author: Roman Gushchin <guro@fb.com>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <bpf.h>
+
+#include "main.h"
+
+#define HELP_SPEC_ATTACH_FLAGS \
+ "ATTACH_FLAGS := { multi | override }"
+
+#define HELP_SPEC_ATTACH_TYPES \
+ "ATTACH_TYPE := { ingress | egress | sock_create | sock_ops | device }"
+
+static const char * const attach_type_strings[] = {
+ [BPF_CGROUP_INET_INGRESS] = "ingress",
+ [BPF_CGROUP_INET_EGRESS] = "egress",
+ [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
+ [BPF_CGROUP_SOCK_OPS] = "sock_ops",
+ [BPF_CGROUP_DEVICE] = "device",
+ [__MAX_BPF_ATTACH_TYPE] = NULL,
+};
+
+static enum bpf_attach_type parse_attach_type(const char *str)
+{
+ enum bpf_attach_type type;
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ if (attach_type_strings[type] &&
+ is_prefix(str, attach_type_strings[type]))
+ return type;
+ }
+
+ return __MAX_BPF_ATTACH_TYPE;
+}
+
+static int show_bpf_prog(int id, const char *attach_type_str,
+ const char *attach_flags_str)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int prog_fd;
+
+ prog_fd = bpf_prog_get_fd_by_id(id);
+ if (prog_fd < 0)
+ return -1;
+
+ if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ close(prog_fd);
+ return -1;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "id", info.id);
+ jsonw_string_field(json_wtr, "attach_type",
+ attach_type_str);
+ jsonw_string_field(json_wtr, "attach_flags",
+ attach_flags_str);
+ jsonw_string_field(json_wtr, "name", info.name);
+ jsonw_end_object(json_wtr);
+ } else {
+ printf("%-8u %-15s %-15s %-15s\n", info.id,
+ attach_type_str,
+ attach_flags_str,
+ info.name);
+ }
+
+ close(prog_fd);
+ return 0;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+ __u32 prog_ids[1024] = {0};
+ char *attach_flags_str;
+ __u32 prog_cnt, iter;
+ __u32 attach_flags;
+ char buf[32];
+ int ret;
+
+ prog_cnt = ARRAY_SIZE(prog_ids);
+ ret = bpf_prog_query(cgroup_fd, type, 0, &attach_flags, prog_ids,
+ &prog_cnt);
+ if (ret)
+ return ret;
+
+ if (prog_cnt == 0)
+ return 0;
+
+ switch (attach_flags) {
+ case BPF_F_ALLOW_MULTI:
+ attach_flags_str = "multi";
+ break;
+ case BPF_F_ALLOW_OVERRIDE:
+ attach_flags_str = "override";
+ break;
+ case 0:
+ attach_flags_str = "";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+ attach_flags_str = buf;
+ }
+
+ for (iter = 0; iter < prog_cnt; iter++)
+ show_bpf_prog(prog_ids[iter], attach_type_strings[type],
+ attach_flags_str);
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ enum bpf_attach_type type;
+ int cgroup_fd;
+ int ret = -1;
+
+ if (argc < 1) {
+ p_err("too few parameters for cgroup show");
+ goto exit;
+ } else if (argc > 1) {
+ p_err("too many parameters for cgroup show");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[1]);
+ goto exit;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else
+ printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
+ "AttachFlags", "Name");
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ /*
+ * Not all attach types may be supported, so it's expected,
+ * that some requests will fail.
+ * If we were able to get the show for at least one
+ * attach type, let's return 0.
+ */
+ if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+ ret = 0;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_attach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int cgroup_fd, prog_fd;
+ int attach_flags = 0;
+ int ret = -1;
+ int i;
+
+ if (argc < 4) {
+ p_err("too few parameters for cgroup attach");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[1]);
+ goto exit;
+ }
+
+ attach_type = parse_attach_type(argv[1]);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ goto exit_cgroup;
+ }
+
+ argc -= 2;
+ argv = &argv[2];
+ prog_fd = prog_parse_fd(&argc, &argv);
+ if (prog_fd < 0)
+ goto exit_cgroup;
+
+ for (i = 0; i < argc; i++) {
+ if (is_prefix(argv[i], "multi")) {
+ attach_flags |= BPF_F_ALLOW_MULTI;
+ } else if (is_prefix(argv[i], "override")) {
+ attach_flags |= BPF_F_ALLOW_OVERRIDE;
+ } else {
+ p_err("unknown option: %s", argv[i]);
+ goto exit_cgroup;
+ }
+ }
+
+ if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, attach_flags)) {
+ p_err("failed to attach program");
+ goto exit_prog;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ ret = 0;
+
+exit_prog:
+ close(prog_fd);
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int prog_fd, cgroup_fd;
+ int ret = -1;
+
+ if (argc < 4) {
+ p_err("too few parameters for cgroup detach");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[1]);
+ goto exit;
+ }
+
+ attach_type = parse_attach_type(argv[1]);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ goto exit_cgroup;
+ }
+
+ argc -= 2;
+ argv = &argv[2];
+ prog_fd = prog_parse_fd(&argc, &argv);
+ if (prog_fd < 0)
+ goto exit_cgroup;
+
+ if (bpf_prog_detach2(prog_fd, cgroup_fd, attach_type)) {
+ p_err("failed to detach program");
+ goto exit_prog;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ ret = 0;
+
+exit_prog:
+ close(prog_fd);
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s { show | list } CGROUP\n"
+ " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
+ " %s %s detach CGROUP ATTACH_TYPE PROG\n"
+ " %s %s help\n"
+ "\n"
+ " " HELP_SPEC_ATTACH_TYPES "\n"
+ " " HELP_SPEC_ATTACH_FLAGS "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " " HELP_SPEC_OPTIONS "\n"
+ "",
+ bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "attach", do_attach },
+ { "detach", do_detach },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_cgroup(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 2bd3b280e6dd..0b482c0070e0 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -34,6 +34,7 @@
/* Author: Jakub Kicinski <kubakici@wp.pl> */
#include <errno.h>
+#include <fcntl.h>
#include <fts.h>
#include <libgen.h>
#include <mntent.h>
@@ -44,7 +45,9 @@
#include <unistd.h>
#include <linux/limits.h>
#include <linux/magic.h>
+#include <net/if.h>
#include <sys/mount.h>
+#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
@@ -163,13 +166,49 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
return fd;
}
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_fd(int fd, const char *name)
{
char err_str[ERR_MAX_LEN];
- unsigned int id;
- char *endptr;
char *file;
char *dir;
+ int err = 0;
+
+ err = bpf_obj_pin(fd, name);
+ if (!err)
+ goto out;
+
+ file = malloc(strlen(name) + 1);
+ strcpy(file, name);
+ dir = dirname(file);
+
+ if (errno != EPERM || is_bpffs(dir)) {
+ p_err("can't pin the object (%s): %s", name, strerror(errno));
+ goto out_free;
+ }
+
+ /* Attempt to mount bpffs, then retry pinning. */
+ err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
+ if (!err) {
+ err = bpf_obj_pin(fd, name);
+ if (err)
+ p_err("can't pin the object (%s): %s", name,
+ strerror(errno));
+ } else {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+ p_err("can't mount BPF file system to pin the object (%s): %s",
+ name, err_str);
+ }
+
+out_free:
+ free(file);
+out:
+ return err;
+}
+
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+{
+ unsigned int id;
+ char *endptr;
int err;
int fd;
@@ -195,35 +234,8 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
return -1;
}
- err = bpf_obj_pin(fd, *argv);
- if (!err)
- goto out_close;
-
- file = malloc(strlen(*argv) + 1);
- strcpy(file, *argv);
- dir = dirname(file);
-
- if (errno != EPERM || is_bpffs(dir)) {
- p_err("can't pin the object (%s): %s", *argv, strerror(errno));
- goto out_free;
- }
-
- /* Attempt to mount bpffs, then retry pinning. */
- err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
- if (!err) {
- err = bpf_obj_pin(fd, *argv);
- if (err)
- p_err("can't pin the object (%s): %s", *argv,
- strerror(errno));
- } else {
- err_str[ERR_MAX_LEN - 1] = '\0';
- p_err("can't mount BPF file system to pin the object (%s): %s",
- *argv, err_str);
- }
+ err = do_pin_fd(fd, *argv);
-out_free:
- free(file);
-out_close:
close(fd);
return err;
}
@@ -403,3 +415,124 @@ void delete_pinned_obj_table(struct pinned_obj_table *tab)
free(obj);
}
}
+
+static char *
+ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
+{
+ struct stat st;
+ int err;
+
+ err = stat("/proc/self/ns/net", &st);
+ if (err) {
+ p_err("Can't stat /proc/self: %s", strerror(errno));
+ return NULL;
+ }
+
+ if (st.st_dev != ns_dev || st.st_ino != ns_ino)
+ return NULL;
+
+ return if_indextoname(ifindex, buf);
+}
+
+static int read_sysfs_hex_int(char *path)
+{
+ char vendor_id_buf[8];
+ int len;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ p_err("Can't open %s: %s", path, strerror(errno));
+ return -1;
+ }
+
+ len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
+ close(fd);
+ if (len < 0) {
+ p_err("Can't read %s: %s", path, strerror(errno));
+ return -1;
+ }
+ if (len >= (int)sizeof(vendor_id_buf)) {
+ p_err("Value in %s too long", path);
+ return -1;
+ }
+
+ vendor_id_buf[len] = 0;
+
+ return strtol(vendor_id_buf, NULL, 0);
+}
+
+static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
+{
+ char full_path[64];
+
+ snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
+ devname, entry_name);
+
+ return read_sysfs_hex_int(full_path);
+}
+
+const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino)
+{
+ char devname[IF_NAMESIZE];
+ int vendor_id;
+ int device_id;
+
+ if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
+ p_err("Can't get net device name for ifindex %d: %s", ifindex,
+ strerror(errno));
+ return NULL;
+ }
+
+ vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
+ if (vendor_id < 0) {
+ p_err("Can't get device vendor id for %s", devname);
+ return NULL;
+ }
+
+ switch (vendor_id) {
+ case 0x19ee:
+ device_id = read_sysfs_netdev_hex_int(devname, "device");
+ if (device_id != 0x4000 &&
+ device_id != 0x6000 &&
+ device_id != 0x6003)
+ p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
+ return "NFP-6xxx";
+ default:
+ p_err("Can't get bfd arch name for device vendor id 0x%04x",
+ vendor_id);
+ return NULL;
+ }
+}
+
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+ char name[IF_NAMESIZE];
+
+ if (!ifindex)
+ return;
+
+ printf(" dev ");
+ if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+ printf("%s", name);
+ else
+ printf("ifindex %u ns_dev %llu ns_ino %llu",
+ ifindex, ns_dev, ns_inode);
+}
+
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+ char name[IF_NAMESIZE];
+
+ if (!ifindex)
+ return;
+
+ jsonw_name(json_wtr, "dev");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "ifindex", ifindex);
+ jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
+ jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
+ if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_end_object(json_wtr);
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 1551d3918d4c..87439320ef70 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -76,7 +76,8 @@ static int fprintf_json(void *out, const char *fmt, ...)
return 0;
}
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch)
{
disassembler_ftype disassemble;
struct disassemble_info info;
@@ -100,6 +101,19 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
else
init_disassemble_info(&info, stdout,
(fprintf_ftype) fprintf);
+
+ /* Update architecture info for offload. */
+ if (arch) {
+ const bfd_arch_info_type *inf = bfd_scan_arch(arch);
+
+ if (inf) {
+ bfdf->arch_info = inf;
+ } else {
+ p_err("No libfd support for %s", arch);
+ return;
+ }
+ }
+
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info.buffer = image;
@@ -107,7 +121,14 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
disassemble_init_for_target(&info);
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
disassemble = disassembler(bfdf);
+#endif
assert(disassemble);
if (json_output)
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d6e4762170a4..3a0396d87c42 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -38,7 +38,6 @@
#include <errno.h>
#include <getopt.h>
#include <linux/bpf.h>
-#include <linux/version.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -58,11 +57,19 @@ bool show_pinned;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
+static void __noreturn clean_and_exit(int i)
+{
+ if (json_output)
+ jsonw_destroy(&json_wtr);
+
+ exit(i);
+}
+
void usage(void)
{
last_do_help(last_argc - 1, last_argv + 1);
- exit(-1);
+ clean_and_exit(-1);
}
static int do_help(int argc, char **argv)
@@ -77,7 +84,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map }\n"
+ " OBJECT := { prog | map | cgroup }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -87,21 +94,13 @@ static int do_help(int argc, char **argv)
static int do_version(int argc, char **argv)
{
- unsigned int version[3];
-
- version[0] = LINUX_VERSION_CODE >> 16;
- version[1] = LINUX_VERSION_CODE >> 8 & 0xf;
- version[2] = LINUX_VERSION_CODE & 0xf;
-
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "version");
- jsonw_printf(json_wtr, "\"%u.%u.%u\"",
- version[0], version[1], version[2]);
+ jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
jsonw_end_object(json_wtr);
} else {
- printf("%s v%u.%u.%u\n", bin_name,
- version[0], version[1], version[2]);
+ printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
}
return 0;
}
@@ -165,6 +164,7 @@ static const struct cmd cmds[] = {
{ "batch", do_batch },
{ "prog", do_prog },
{ "map", do_map },
+ { "cgroup", do_cgroup },
{ "version", do_version },
{ 0 }
};
@@ -280,6 +280,7 @@ int main(int argc, char **argv)
hash_init(prog_table.table);
hash_init(map_table.table);
+ opterr = 0;
while ((opt = getopt_long(argc, argv, "Vhpjf",
options, NULL)) >= 0) {
switch (opt) {
@@ -291,13 +292,25 @@ int main(int argc, char **argv)
pretty_output = true;
/* fall through */
case 'j':
- json_output = true;
+ if (!json_output) {
+ json_wtr = jsonw_new(stdout);
+ if (!json_wtr) {
+ p_err("failed to create JSON writer");
+ return -1;
+ }
+ json_output = true;
+ }
+ jsonw_pretty(json_wtr, pretty_output);
break;
case 'f':
show_pinned = true;
break;
default:
- usage();
+ p_err("unrecognized option '%s'", argv[optind - 1]);
+ if (json_output)
+ clean_and_exit(-1);
+ else
+ usage();
}
}
@@ -306,15 +319,6 @@ int main(int argc, char **argv)
if (argc < 0)
usage();
- if (json_output) {
- json_wtr = jsonw_new(stdout);
- if (!json_wtr) {
- p_err("failed to create JSON writer");
- return -1;
- }
- jsonw_pretty(json_wtr, pretty_output);
- }
-
bfd_init();
ret = cmd_select(cmds, argc, argv, do_help);
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 9c191e222d6f..b8e9584d6246 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -41,6 +41,7 @@
#include <stdbool.h>
#include <stdio.h>
#include <linux/bpf.h>
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/hashtable.h>
@@ -50,7 +51,7 @@
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
-#define BAD_ARG() ({ p_err("what is '%s'?\n", *argv); -1; })
+#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
#define ERR_MAX_LEN 1024
@@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
bool is_prefix(const char *pfx, const char *str);
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
-void usage(void) __attribute__((noreturn));
+void usage(void) __noreturn;
struct pinned_obj_table {
DECLARE_HASHTABLE(table, 16);
@@ -95,6 +96,8 @@ struct pinned_obj {
int build_pinned_obj_table(struct pinned_obj_table *table,
enum bpf_obj_type type);
void delete_pinned_obj_table(struct pinned_obj_table *tab);
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
struct cmd {
const char *cmd;
@@ -110,13 +113,18 @@ char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(char *path);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_fd(int fd, const char *name);
int do_prog(int argc, char **arg);
int do_map(int argc, char **arg);
+int do_cgroup(int argc, char **arg);
int prog_parse_fd(int *argc, char ***argv);
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes);
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch);
void print_hex_data_json(uint8_t *data, size_t len);
+const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
+
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e2450c8e88e6..f95fa67bb498 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -66,6 +66,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
[BPF_MAP_TYPE_DEVMAP] = "devmap",
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
+ [BPF_MAP_TYPE_CPUMAP] = "cpumap",
};
static unsigned int get_possible_cpus(void)
@@ -428,6 +429,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_name(json_wtr, "flags");
jsonw_printf(json_wtr, "%#x", info->map_flags);
+
+ print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
jsonw_uint_field(json_wtr, "bytes_key", info->key_size);
jsonw_uint_field(json_wtr, "bytes_value", info->value_size);
jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
@@ -469,7 +473,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
if (*info->name)
printf("name %s ", info->name);
- printf("flags 0x%x\n", info->map_flags);
+ printf("flags 0x%x", info->map_flags);
+ print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
+ printf("\n");
printf("\tkey %uB value %uB max_entries %u",
info->key_size, info->value_size, info->max_entries);
@@ -523,21 +529,23 @@ static int do_show(int argc, char **argv)
break;
p_err("can't get next map: %s%s", strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
- return -1;
+ break;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
p_err("can't get map by id (%u): %s",
id, strerror(errno));
- return -1;
+ break;
}
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
- return -1;
+ break;
}
if (json_output)
@@ -859,7 +867,7 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s show [MAP]\n"
+ "Usage: %s %s { show | list } [MAP]\n"
" %s %s dump MAP\n"
" %s %s update MAP key BYTES value VALUE [UPDATE_FLAGS]\n"
" %s %s lookup MAP key BYTES\n"
@@ -883,6 +891,7 @@ static int do_help(int argc, char **argv)
static const struct cmd cmds[] = {
{ "show", do_show },
+ { "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ "update", do_update },
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index ad619b96c276..e8e2baaf93c2 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -45,6 +45,7 @@
#include <sys/stat.h>
#include <bpf.h>
+#include <libbpf.h>
#include "main.h"
#include "disasm.h"
@@ -65,6 +66,7 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
[BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
[BPF_PROG_TYPE_SK_SKB] = "sk_skb",
+ [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
};
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
@@ -229,6 +231,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
+ print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
if (info->load_time) {
char buf[32];
@@ -286,6 +290,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
+ print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("\n");
if (info->load_time) {
@@ -382,6 +387,8 @@ static int do_show(int argc, char **argv)
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
p_err("can't get prog by id (%u): %s",
id, strerror(errno));
err = -1;
@@ -400,6 +407,88 @@ static int do_show(int argc, char **argv)
return err;
}
+#define SYM_MAX_NAME 256
+
+struct kernel_sym {
+ unsigned long address;
+ char name[SYM_MAX_NAME];
+};
+
+struct dump_data {
+ unsigned long address_call_base;
+ struct kernel_sym *sym_mapping;
+ __u32 sym_count;
+ char scratch_buff[SYM_MAX_NAME];
+};
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+ return ((struct kernel_sym *)sym_a)->address -
+ ((struct kernel_sym *)sym_b)->address;
+}
+
+static void kernel_syms_load(struct dump_data *dd)
+{
+ struct kernel_sym *sym;
+ char buff[256];
+ void *tmp, *address;
+ FILE *fp;
+
+ fp = fopen("/proc/kallsyms", "r");
+ if (!fp)
+ return;
+
+ while (!feof(fp)) {
+ if (!fgets(buff, sizeof(buff), fp))
+ break;
+ tmp = realloc(dd->sym_mapping,
+ (dd->sym_count + 1) *
+ sizeof(*dd->sym_mapping));
+ if (!tmp) {
+out:
+ free(dd->sym_mapping);
+ dd->sym_mapping = NULL;
+ fclose(fp);
+ return;
+ }
+ dd->sym_mapping = tmp;
+ sym = &dd->sym_mapping[dd->sym_count];
+ if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
+ continue;
+ sym->address = (unsigned long)address;
+ if (!strcmp(sym->name, "__bpf_call_base")) {
+ dd->address_call_base = sym->address;
+ /* sysctl kernel.kptr_restrict was set */
+ if (!sym->address)
+ goto out;
+ }
+ if (sym->address)
+ dd->sym_count++;
+ }
+
+ fclose(fp);
+
+ qsort(dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+static void kernel_syms_destroy(struct dump_data *dd)
+{
+ free(dd->sym_mapping);
+}
+
+static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+ unsigned long key)
+{
+ struct kernel_sym sym = {
+ .address = key,
+ };
+
+ return dd->sym_mapping ?
+ bsearch(&sym, dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
{
va_list args;
@@ -409,8 +498,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
va_end(args);
}
-static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
+static const char *print_call_pcrel(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address,
+ const struct bpf_insn *insn)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#%s", insn->off, sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#0x%lx", insn->off, address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%s", sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%lx", address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+ const struct bpf_insn *insn)
+{
+ struct dump_data *dd = private_data;
+ unsigned long address = dd->address_call_base + insn->imm;
+ struct kernel_sym *sym;
+
+ sym = kernel_syms_search(dd, address);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ return print_call_pcrel(dd, sym, address, insn);
+ else
+ return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+ const struct bpf_insn *insn,
+ __u64 full_imm)
+{
+ struct dump_data *dd = private_data;
+
+ if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[id:%u]", insn->imm);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%llx", (unsigned long long)full_imm);
+ return dd->scratch_buff;
+}
+
+static void dump_xlated_plain(struct dump_data *dd, void *buf,
+ unsigned int len, bool opcodes)
{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
@@ -424,7 +576,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
- print_bpf_insn(print_insn, NULL, insn + i, true);
+ print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
printf(" ");
@@ -453,8 +605,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
va_end(args);
}
-static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
+static void dump_xlated_json(struct dump_data *dd, void *buf,
+ unsigned int len, bool opcodes)
{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_json,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
@@ -469,7 +628,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "disasm");
- print_bpf_insn(print_insn_json, NULL, insn + i, true);
+ print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
jsonw_name(json_wtr, "opcodes");
@@ -504,6 +663,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
static int do_dump(int argc, char **argv)
{
struct bpf_prog_info info = {};
+ struct dump_data dd = {};
__u32 len = sizeof(info);
unsigned int buf_size;
char *filepath = NULL;
@@ -591,6 +751,14 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
+ if ((member_len == &info.jited_prog_len &&
+ info.jited_prog_insns == 0) ||
+ (member_len == &info.xlated_prog_len &&
+ info.xlated_prog_insns == 0)) {
+ p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+ goto err_free;
+ }
+
if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) {
@@ -607,17 +775,29 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
} else {
- if (member_len == &info.jited_prog_len)
- disasm_print_insn(buf, *member_len, opcodes);
- else
+ if (member_len == &info.jited_prog_len) {
+ const char *name = NULL;
+
+ if (info.ifindex) {
+ name = ifindex_to_bfd_name_ns(info.ifindex,
+ info.netns_dev,
+ info.netns_ino);
+ if (!name)
+ goto err_free;
+ }
+
+ disasm_print_insn(buf, *member_len, opcodes, name);
+ } else {
+ kernel_syms_load(&dd);
if (json_output)
- dump_xlated_json(buf, *member_len, opcodes);
+ dump_xlated_json(&dd, buf, *member_len, opcodes);
else
- dump_xlated_plain(buf, *member_len, opcodes);
+ dump_xlated_plain(&dd, buf, *member_len, opcodes);
+ kernel_syms_destroy(&dd);
+ }
}
free(buf);
-
return 0;
err_free:
@@ -635,6 +815,30 @@ static int do_pin(int argc, char **argv)
return err;
}
+static int do_load(int argc, char **argv)
+{
+ struct bpf_object *obj;
+ int prog_fd;
+
+ if (argc != 2)
+ usage();
+
+ if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
+ p_err("failed to load program");
+ return -1;
+ }
+
+ if (do_pin_fd(prog_fd, argv[1])) {
+ p_err("failed to pin program");
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -643,26 +847,29 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s show [PROG]\n"
+ "Usage: %s %s { show | list } [PROG]\n"
" %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s pin PROG FILE\n"
+ " %s %s load OBJ FILE\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
+ { "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ "pin", do_pin },
+ { "load", do_load },
{ 0 }
};
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c71a05b9c984..c378f003b007 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -56,6 +56,7 @@ FEATURE_TESTS_BASIC := \
libunwind-arm \
libunwind-aarch64 \
pthread-attr-setaffinity-np \
+ pthread-barrier \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
@@ -65,7 +66,8 @@ FEATURE_TESTS_BASIC := \
bpf \
sched_getcpu \
sdt \
- setns
+ setns \
+ libopencsd
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 96982640fbf8..0a490cb15149 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -13,6 +13,7 @@ FILES= \
test-hello.bin \
test-libaudit.bin \
test-libbfd.bin \
+ test-disassembler-four-args.bin \
test-liberty.bin \
test-liberty-z.bin \
test-cplus-demangle.bin \
@@ -37,6 +38,7 @@ FILES= \
test-libunwind-debug-frame-arm.bin \
test-libunwind-debug-frame-aarch64.bin \
test-pthread-attr-setaffinity-np.bin \
+ test-pthread-barrier.bin \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
@@ -51,7 +53,8 @@ FILES= \
test-cxx.bin \
test-jvmti.bin \
test-sched_getcpu.bin \
- test-setns.bin
+ test-setns.bin \
+ test-libopencsd.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -79,6 +82,9 @@ $(OUTPUT)test-hello.bin:
$(OUTPUT)test-pthread-attr-setaffinity-np.bin:
$(BUILD) -D_GNU_SOURCE -lpthread
+$(OUTPUT)test-pthread-barrier.bin:
+ $(BUILD) -lpthread
+
$(OUTPUT)test-stackprotector-all.bin:
$(BUILD) -fstack-protector-all
@@ -100,6 +106,10 @@ $(OUTPUT)test-sched_getcpu.bin:
$(OUTPUT)test-setns.bin:
$(BUILD)
+$(OUTPUT)test-libopencsd.bin:
+ $(BUILD) # -lopencsd_c_api -lopencsd provided by
+ # $(FEATURE_CHECK_LDFLAGS-libopencsd)
+
DWARFLIBS := -ldw
ifeq ($(findstring -static,${LDFLAGS}),-static)
DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
@@ -188,6 +198,9 @@ $(OUTPUT)test-libpython-version.bin:
$(OUTPUT)test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
+$(OUTPUT)test-disassembler-four-args.bin:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+
$(OUTPUT)test-liberty.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 4112702e4aed..8dc20a61341f 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -118,6 +118,10 @@
# include "test-pthread-attr-setaffinity-np.c"
#undef main
+#define main main_test_pthread_barrier
+# include "test-pthread-barrier.c"
+#undef main
+
#define main main_test_sched_getcpu
# include "test-sched_getcpu.c"
#undef main
@@ -158,6 +162,10 @@
# include "test-setns.c"
#undef main
+#define main main_test_libopencsd
+# include "test-libopencsd.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -187,6 +195,7 @@ int main(int argc, char *argv[])
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
+ main_test_pthread_barrier();
main_test_lzma();
main_test_get_cpuid();
main_test_bpf();
@@ -194,6 +203,7 @@ int main(int argc, char *argv[])
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
+ main_test_libopencsd();
return 0;
}
diff --git a/tools/build/feature/test-disassembler-four-args.c b/tools/build/feature/test-disassembler-four-args.c
new file mode 100644
index 000000000000..45ce65cfddf0
--- /dev/null
+++ b/tools/build/feature/test-disassembler-four-args.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+ bfd *abfd = bfd_openr(NULL, NULL);
+
+ disassembler(bfd_get_arch(abfd),
+ bfd_big_endian(abfd),
+ bfd_get_mach(abfd),
+ abfd);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
new file mode 100644
index 000000000000..5ff1246e6194
--- /dev/null
+++ b/tools/build/feature/test-libopencsd.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <opencsd/c_api/opencsd_c_api.h>
+
+int main(void)
+{
+ (void)ocsd_get_version();
+ return 0;
+}
diff --git a/tools/build/feature/test-pthread-barrier.c b/tools/build/feature/test-pthread-barrier.c
new file mode 100644
index 000000000000..0558d9334d97
--- /dev/null
+++ b/tools/build/feature/test-pthread-barrier.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdint.h>
+#include <pthread.h>
+
+int main(void)
+{
+ pthread_barrier_t barrier;
+
+ pthread_barrier_init(&barrier, NULL, 1);
+ pthread_barrier_wait(&barrier);
+ return pthread_barrier_destroy(&barrier);
+}
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index 1c14c2595158..dac4d4131d9b 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -14,6 +14,7 @@
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
+#include <stdint.h>
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
@@ -23,12 +24,13 @@
#include <getopt.h>
#include <inttypes.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
#include <linux/gpio.h>
int monitor_device(const char *device_name,
unsigned int line,
- u_int32_t handleflags,
- u_int32_t eventflags,
+ uint32_t handleflags,
+ uint32_t eventflags,
unsigned int loops)
{
struct gpioevent_request req;
@@ -145,8 +147,8 @@ int main(int argc, char **argv)
const char *device_name = NULL;
unsigned int line = -1;
unsigned int loops = 0;
- u_int32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
- u_int32_t eventflags = 0;
+ uint32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
+ uint32_t eventflags = 0;
int c;
while ((c = getopt(argc, argv, "c:n:o:dsrf?")) != -1) {
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 31503819454d..1139d71fa0cf 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -7,9 +7,30 @@ CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
-all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+sbindir ?= /usr/sbin
+libexecdir ?= /usr/libexec
+sharedstatedir ?= /var/lib
+
+ALL_PROGRAMS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+
+ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh
+
+all: $(ALL_PROGRAMS)
+
%: %.c
$(CC) $(CFLAGS) -o $@ $^
clean:
$(RM) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+
+install: all
+ install -d -m 755 $(DESTDIR)$(sbindir); \
+ install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \
+ install -d -m 755 $(DESTDIR)$(sharedstatedir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program -m 755 $(DESTDIR)$(sbindir); \
+ done; \
+ install -m 755 lsvmbus $(DESTDIR)$(sbindir); \
+ for script in $(ALL_SCRIPTS); do \
+ install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \
+ done
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index eaa3bec273c8..4c99c57736ce 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
for (;;) {
readp = &record[records_read];
records_read += fread(readp, sizeof(struct kvp_record),
- ENTRIES_PER_BLOCK * num_blocks,
- filep);
+ ENTRIES_PER_BLOCK * num_blocks - records_read,
+ filep);
if (ferror(filep)) {
- syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
+ syslog(LOG_ERR,
+ "Failed to read file, pool: %d; error: %d %s",
+ pool, errno, strerror(errno));
+ kvp_release_lock(pool);
exit(EXIT_FAILURE);
}
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
if (record == NULL) {
syslog(LOG_ERR, "malloc failed");
+ kvp_release_lock(pool);
exit(EXIT_FAILURE);
}
continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
fclose(filep);
kvp_release_lock(pool);
}
+
static int kvp_file_init(void)
{
int fd;
- FILE *filep;
- size_t records_read;
char *fname;
- struct kvp_record *record;
- struct kvp_record *readp;
- int num_blocks;
int i;
int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
for (i = 0; i < KVP_POOL_COUNT; i++) {
fname = kvp_file_info[i].fname;
- records_read = 0;
- num_blocks = 1;
sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
if (fd == -1)
return 1;
-
- filep = fopen(fname, "re");
- if (!filep) {
- close(fd);
- return 1;
- }
-
- record = malloc(alloc_unit * num_blocks);
- if (record == NULL) {
- fclose(filep);
- close(fd);
- return 1;
- }
- for (;;) {
- readp = &record[records_read];
- records_read += fread(readp, sizeof(struct kvp_record),
- ENTRIES_PER_BLOCK,
- filep);
-
- if (ferror(filep)) {
- syslog(LOG_ERR, "Failed to read file, pool: %d",
- i);
- exit(EXIT_FAILURE);
- }
-
- if (!feof(filep)) {
- /*
- * We have more data to read.
- */
- num_blocks++;
- record = realloc(record, alloc_unit *
- num_blocks);
- if (record == NULL) {
- fclose(filep);
- close(fd);
- return 1;
- }
- continue;
- }
- break;
- }
kvp_file_info[i].fd = fd;
- kvp_file_info[i].num_blocks = num_blocks;
- kvp_file_info[i].records = record;
- kvp_file_info[i].num_records = records_read;
- fclose(filep);
-
+ kvp_file_info[i].num_blocks = 1;
+ kvp_file_info[i].records = malloc(alloc_unit);
+ if (kvp_file_info[i].records == NULL)
+ return 1;
+ kvp_file_info[i].num_records = 0;
+ kvp_update_mem_state(i);
}
return 0;
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
index 9311fadaaab2..16ed1982cb34 100644
--- a/tools/include/asm-generic/bitops/find.h
+++ b/tools/include/asm-generic/bitops/find.h
@@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
#endif
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset);
+#endif
+
#ifndef find_next_zero_bit
/**
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 07fd03c74a77..04e32f965ad7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -84,8 +84,6 @@
#define uninitialized_var(x) x = *(&(x))
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
-
#include <linux/types.h>
/*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering. One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
*
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
- * compile-time warning.
+ * These two macros will also work on aggregate data types like structs or
+ * unions. If the size of the accessed data type exceeds the word size of
+ * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
+ * fall back to memcpy and print a compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/tools/include/linux/kmemcheck.h
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 940c1b075659..6b0c36a58fcb 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define pr_warn pr_err
+#define pr_cont pr_err
#define list_del_rcu list_del
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644
index 000000000000..53815d2cd047
--- /dev/null
+++ b/tools/include/uapi/asm-generic/bpf_perf_event.h
@@ -0,0 +1,9 @@
+#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
+
+#include <linux/ptrace.h>
+
+/* Export kernel pt_regs structure */
+typedef struct pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
diff --git a/tools/include/uapi/asm-generic/errno-base.h b/tools/include/uapi/asm-generic/errno-base.h
new file mode 100644
index 000000000000..9653140bff92
--- /dev/null
+++ b/tools/include/uapi/asm-generic/errno-base.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_GENERIC_ERRNO_BASE_H
+#define _ASM_GENERIC_ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
diff --git a/tools/include/uapi/asm-generic/errno.h b/tools/include/uapi/asm-generic/errno.h
new file mode 100644
index 000000000000..cf9c51ac49f9
--- /dev/null
+++ b/tools/include/uapi/asm-generic/errno.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_GENERIC_ERRNO_H
+#define _ASM_GENERIC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+
+/*
+ * This error code is special: arch syscall entry code will return
+ * -ENOSYS if users try to call a syscall that doesn't exist. To keep
+ * failures of syscalls that really do exist distinguishable from
+ * failures due to attempts to use a nonexistent syscall, syscall
+ * implementations should refrain from returning -ENOSYS.
+ */
+#define ENOSYS 38 /* Invalid system call number */
+
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 130 /* Owner died */
+#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#define ERFKILL 132 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 133 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 2dffcbf705b3..653687d9771b 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -13,6 +13,7 @@
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
+#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..13a58531e6fa
--- /dev/null
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,7 @@
+#if defined(__aarch64__)
+#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__s390__)
+#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
+#else
+#include <uapi/asm-generic/bpf_perf_event.h>
+#endif
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 97677cd6964d..6fdff5945c8a 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
__u32 pad;
};
+/* Query current scanout sequence number */
+struct drm_crtc_get_sequence {
+ __u32 crtc_id; /* requested crtc_id */
+ __u32 active; /* return: crtc output is active */
+ __u64 sequence; /* return: most recent vblank sequence */
+ __s64 sequence_ns; /* return: most recent time of first pixel out */
+};
+
+/* Queue event to be delivered at specified sequence. Time stamp marks
+ * when the first pixel of the refresh cycle leaves the display engine
+ * for the display
+ */
+#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
+#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
+
+struct drm_crtc_queue_sequence {
+ __u32 crtc_id;
+ __u32 flags;
+ __u64 sequence; /* on input, target sequence. on output, actual sequence */
+ __u64 user_data; /* user data passed to event */
+};
+
#if defined(__cplusplus)
}
#endif
@@ -819,6 +841,9 @@ extern "C" {
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
+#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
+
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
+#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
+#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
+#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
+#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
+#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
__u32 crtc_id; /* 0 on older kernels that do not support this */
};
+/* Event delivered at sequence. Time stamp marks when the first pixel
+ * of the refresh cycle leaves the display engine for the display
+ */
+struct drm_event_crtc_sequence {
+ struct drm_event base;
+ __u64 user_data;
+ __s64 time_ns;
+ __u64 sequence;
+};
+
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 9816590d3ad2..536ee4febd74 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
+/*
+ * Different engines serve different roles, and there may be more than one
+ * engine serving each role. enum drm_i915_gem_engine_class provides a
+ * classification of the role of the engine, which may be used when requesting
+ * operations to be performed on a certain subset of engines, or for providing
+ * information about that group.
+ */
+enum drm_i915_gem_engine_class {
+ I915_ENGINE_CLASS_RENDER = 0,
+ I915_ENGINE_CLASS_COPY = 1,
+ I915_ENGINE_CLASS_VIDEO = 2,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+
+ I915_ENGINE_CLASS_INVALID = -1
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+ I915_SAMPLE_BUSY = 0,
+ I915_SAMPLE_WAIT = 1,
+ I915_SAMPLE_SEMA = 2
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+ (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+ ((class) << I915_PMU_CLASS_SHIFT | \
+ (instance) << I915_PMU_SAMPLE_BITS | \
+ (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
+#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
+#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+
+#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -397,10 +453,20 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
-/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
* priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
*/
#define I915_PARAM_HAS_SCHEDULER 41
+#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
+#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
+#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+
#define I915_PARAM_HUC_STATUS 42
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -440,6 +506,27 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
+/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
+ * registers. This used to be fixed per platform but from CNL onwards, this
+ * might vary depending on the parts.
+ */
+#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -1309,14 +1396,16 @@ struct drm_i915_reg_read {
* be specified
*/
__u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
- * single instruction 8byte read, in order to workaround that use
- * offset (0x2538 | 1) instead.
+ * single instruction 8byte read, in order to workaround that pass
+ * flag I915_REG_READ_8B_WA in offset field.
*
*/
@@ -1359,6 +1448,10 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
#define I915_CONTEXT_PARAM_BANNABLE 0x5
+#define I915_CONTEXT_PARAM_PRIORITY 0x6
+#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
+#define I915_CONTEXT_DEFAULT_PRIORITY 0
+#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
__u64 value;
};
@@ -1510,9 +1603,14 @@ struct drm_i915_perf_oa_config {
__u32 n_boolean_regs;
__u32 n_flex_regs;
- __u64 __user mux_regs_ptr;
- __u64 __user boolean_regs_ptr;
- __u64 __user flex_regs_ptr;
+ /*
+ * These fields are pointers to tuples of u32 values (register
+ * address, value). For example the expected length of the buffer
+ * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ */
+ __u64 mux_regs_ptr;
+ __u64 boolean_regs_ptr;
+ __u64 flex_regs_ptr;
};
#if defined(__cplusplus)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
+#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_XADD 0xc0 /* exclusive add */
/* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
* BPF_F_NUMA_NODE is set).
*/
char map_name[BPF_OBJ_NAME_LEN];
+ __u32 map_ifindex; /* ifindex of netdev to create on */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
* @optlen: length of optval in bytes
* Return: 0 or negative error
*
+ * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
+ * Set callback flags for sock_ops
+ * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
+ * @flags: flags value
+ * Return: 0 for no error
+ * -EINVAL if there is no full tcp socket
+ * bits in flags that are not supported by current kernel
+ *
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
* @buf: buf to fill
* @buf_size: size of the buf
* Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ * @pt_regs: pointer to struct pt_regs
+ * @rc: the return value to set
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -736,7 +755,9 @@ union bpf_attr {
FN(xdp_adjust_meta), \
FN(perf_event_read_value), \
FN(perf_prog_read_value), \
- FN(getsockopt),
+ FN(getsockopt), \
+ FN(override_return), \
+ FN(sock_ops_cb_flags_set),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
__u32 data;
__u32 data_end;
__u32 data_meta;
+ /* Below access go through struct xdp_rxq_info */
+ __u32 ingress_ifindex; /* rxq->dev->ifindex */
+ __u32 rx_queue_index; /* rxq->queue_index */
};
enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
struct bpf_sock_ops {
__u32 op;
union {
- __u32 reply;
- __u32 replylong[4];
+ __u32 args[4]; /* Optionally passed to bpf program */
+ __u32 reply; /* Returned by bpf program */
+ __u32 replylong[4]; /* Optionally returned by bpf prog */
};
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 is_fullsock; /* Some TCP fields are only valid if
+ * there is a full socket. If not, the
+ * fields read as zero.
+ */
+ __u32 snd_cwnd;
+ __u32 srtt_us; /* Averaged RTT << 3 in usecs */
+ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
+ __u32 state;
+ __u32 rtt_min;
+ __u32 snd_ssthresh;
+ __u32 rcv_nxt;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 mss_cache;
+ __u32 ecn_flags;
+ __u32 rate_delivered;
+ __u32 rate_interval_us;
+ __u32 packets_out;
+ __u32 retrans_out;
+ __u32 total_retrans;
+ __u32 segs_in;
+ __u32 data_segs_in;
+ __u32 segs_out;
+ __u32 data_segs_out;
+ __u32 lost_out;
+ __u32 sacked_out;
+ __u32 sk_txhash;
+ __u64 bytes_received;
+ __u64 bytes_acked;
};
+/* Definitions for bpf_sock_ops_cb_flags */
+#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
+#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
+#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+ * supported cb flags
+ */
+
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
@@ -976,6 +1044,43 @@ enum {
* a congestion threshold. RTTs above
* this indicate congestion
*/
+ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
+ * Arg1: value of icsk_retransmits
+ * Arg2: value of icsk_rto
+ * Arg3: whether RTO has expired
+ */
+ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
+ * Arg1: sequence number of 1st byte
+ * Arg2: # segments
+ * Arg3: return value of
+ * tcp_transmit_skb (0 => success)
+ */
+ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
+ * Arg1: old_state
+ * Arg2: new_state
+ */
+};
+
+/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
+ * changes between the TCP and BPF versions. Ideally this should never happen.
+ * If it does, we need to add code to convert them before calling
+ * the BPF sock_ops function.
+ */
+enum {
+ BPF_TCP_ESTABLISHED = 1,
+ BPF_TCP_SYN_SENT,
+ BPF_TCP_SYN_RECV,
+ BPF_TCP_FIN_WAIT1,
+ BPF_TCP_FIN_WAIT2,
+ BPF_TCP_TIME_WAIT,
+ BPF_TCP_CLOSE,
+ BPF_TCP_CLOSE_WAIT,
+ BPF_TCP_LAST_ACK,
+ BPF_TCP_LISTEN,
+ BPF_TCP_CLOSING, /* Now a valid state */
+ BPF_TCP_NEW_SYN_RECV,
+
+ BPF_TCP_MAX_STATES /* Leave at the end! */
};
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
struct bpf_cgroup_dev_ctx {
- __u32 access_type; /* (access << 16) | type */
+ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+ __u32 access_type;
__u32 major;
__u32 minor;
};
diff --git a/tools/include/uapi/linux/bpf_common.h b/tools/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/tools/include/uapi/linux/bpf_common.h
+++ b/tools/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
-#define BPF_W 0x00
-#define BPF_H 0x08
-#define BPF_B 0x10
+#define BPF_W 0x00 /* 32-bit */
+#define BPF_H 0x08 /* 16-bit */
+#define BPF_B 0x10 /* 8-bit */
+/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
index 067427259820..8f95303f9d80 100644
--- a/tools/include/uapi/linux/bpf_perf_event.h
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
#define _UAPI__LINUX_BPF_PERF_EVENT_H__
-#include <linux/types.h>
-#include <linux/ptrace.h>
+#include <asm/bpf_perf_event.h>
struct bpf_perf_event_data {
- struct pt_regs regs;
+ bpf_user_pt_regs_t regs;
__u64 sample_period;
};
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
new file mode 100644
index 000000000000..6d9447700e18
--- /dev/null
+++ b/tools/include/uapi/linux/if_link.h
@@ -0,0 +1,944 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_IF_LINK_H
+#define _UAPI_LINUX_IF_LINK_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+/* This struct should be in sync with struct rtnl_link_stats64 */
+struct rtnl_link_stats {
+ __u32 rx_packets; /* total packets received */
+ __u32 tx_packets; /* total packets transmitted */
+ __u32 rx_bytes; /* total bytes received */
+ __u32 tx_bytes; /* total bytes transmitted */
+ __u32 rx_errors; /* bad packets received */
+ __u32 tx_errors; /* packet transmit problems */
+ __u32 rx_dropped; /* no space in linux buffers */
+ __u32 tx_dropped; /* no space available in linux */
+ __u32 multicast; /* multicast packets received */
+ __u32 collisions;
+
+ /* detailed rx_errors: */
+ __u32 rx_length_errors;
+ __u32 rx_over_errors; /* receiver ring buff overflow */
+ __u32 rx_crc_errors; /* recved pkt with crc error */
+ __u32 rx_frame_errors; /* recv'd frame alignment error */
+ __u32 rx_fifo_errors; /* recv'r fifo overrun */
+ __u32 rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ __u32 tx_aborted_errors;
+ __u32 tx_carrier_errors;
+ __u32 tx_fifo_errors;
+ __u32 tx_heartbeat_errors;
+ __u32 tx_window_errors;
+
+ /* for cslip etc */
+ __u32 rx_compressed;
+ __u32 tx_compressed;
+
+ __u32 rx_nohandler; /* dropped, no handler found */
+};
+
+/* The main device statistics structure */
+struct rtnl_link_stats64 {
+ __u64 rx_packets; /* total packets received */
+ __u64 tx_packets; /* total packets transmitted */
+ __u64 rx_bytes; /* total bytes received */
+ __u64 tx_bytes; /* total bytes transmitted */
+ __u64 rx_errors; /* bad packets received */
+ __u64 tx_errors; /* packet transmit problems */
+ __u64 rx_dropped; /* no space in linux buffers */
+ __u64 tx_dropped; /* no space available in linux */
+ __u64 multicast; /* multicast packets received */
+ __u64 collisions;
+
+ /* detailed rx_errors: */
+ __u64 rx_length_errors;
+ __u64 rx_over_errors; /* receiver ring buff overflow */
+ __u64 rx_crc_errors; /* recved pkt with crc error */
+ __u64 rx_frame_errors; /* recv'd frame alignment error */
+ __u64 rx_fifo_errors; /* recv'r fifo overrun */
+ __u64 rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ __u64 tx_aborted_errors;
+ __u64 tx_carrier_errors;
+ __u64 tx_fifo_errors;
+ __u64 tx_heartbeat_errors;
+ __u64 tx_window_errors;
+
+ /* for cslip etc */
+ __u64 rx_compressed;
+ __u64 tx_compressed;
+
+ __u64 rx_nohandler; /* dropped, no handler found */
+};
+
+/* The struct should be in sync with struct ifmap */
+struct rtnl_link_ifmap {
+ __u64 mem_start;
+ __u64 mem_end;
+ __u64 base_addr;
+ __u16 irq;
+ __u8 dma;
+ __u8 port;
+};
+
+/*
+ * IFLA_AF_SPEC
+ * Contains nested attributes for address family specific attributes.
+ * Each address family may create a attribute with the address family
+ * number as type and create its own attribute structure in it.
+ *
+ * Example:
+ * [IFLA_AF_SPEC] = {
+ * [AF_INET] = {
+ * [IFLA_INET_CONF] = ...,
+ * },
+ * [AF_INET6] = {
+ * [IFLA_INET6_FLAGS] = ...,
+ * [IFLA_INET6_CONF] = ...,
+ * }
+ * }
+ */
+
+enum {
+ IFLA_UNSPEC,
+ IFLA_ADDRESS,
+ IFLA_BROADCAST,
+ IFLA_IFNAME,
+ IFLA_MTU,
+ IFLA_LINK,
+ IFLA_QDISC,
+ IFLA_STATS,
+ IFLA_COST,
+#define IFLA_COST IFLA_COST
+ IFLA_PRIORITY,
+#define IFLA_PRIORITY IFLA_PRIORITY
+ IFLA_MASTER,
+#define IFLA_MASTER IFLA_MASTER
+ IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */
+#define IFLA_WIRELESS IFLA_WIRELESS
+ IFLA_PROTINFO, /* Protocol specific information for a link */
+#define IFLA_PROTINFO IFLA_PROTINFO
+ IFLA_TXQLEN,
+#define IFLA_TXQLEN IFLA_TXQLEN
+ IFLA_MAP,
+#define IFLA_MAP IFLA_MAP
+ IFLA_WEIGHT,
+#define IFLA_WEIGHT IFLA_WEIGHT
+ IFLA_OPERSTATE,
+ IFLA_LINKMODE,
+ IFLA_LINKINFO,
+#define IFLA_LINKINFO IFLA_LINKINFO
+ IFLA_NET_NS_PID,
+ IFLA_IFALIAS,
+ IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */
+ IFLA_VFINFO_LIST,
+ IFLA_STATS64,
+ IFLA_VF_PORTS,
+ IFLA_PORT_SELF,
+ IFLA_AF_SPEC,
+ IFLA_GROUP, /* Group the device belongs to */
+ IFLA_NET_NS_FD,
+ IFLA_EXT_MASK, /* Extended info mask, VFs, etc */
+ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */
+#define IFLA_PROMISCUITY IFLA_PROMISCUITY
+ IFLA_NUM_TX_QUEUES,
+ IFLA_NUM_RX_QUEUES,
+ IFLA_CARRIER,
+ IFLA_PHYS_PORT_ID,
+ IFLA_CARRIER_CHANGES,
+ IFLA_PHYS_SWITCH_ID,
+ IFLA_LINK_NETNSID,
+ IFLA_PHYS_PORT_NAME,
+ IFLA_PROTO_DOWN,
+ IFLA_GSO_MAX_SEGS,
+ IFLA_GSO_MAX_SIZE,
+ IFLA_PAD,
+ IFLA_XDP,
+ IFLA_EVENT,
+ IFLA_NEW_NETNSID,
+ IFLA_IF_NETNSID,
+ IFLA_CARRIER_UP_COUNT,
+ IFLA_CARRIER_DOWN_COUNT,
+ IFLA_NEW_IFINDEX,
+ __IFLA_MAX
+};
+
+
+#define IFLA_MAX (__IFLA_MAX - 1)
+
+/* backwards compatibility for userspace */
+#ifndef __KERNEL__
+#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
+#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
+#endif
+
+enum {
+ IFLA_INET_UNSPEC,
+ IFLA_INET_CONF,
+ __IFLA_INET_MAX,
+};
+
+#define IFLA_INET_MAX (__IFLA_INET_MAX - 1)
+
+/* ifi_flags.
+
+ IFF_* flags.
+
+ The only change is:
+ IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are
+ more not changeable by user. They describe link media
+ characteristics and set by device driver.
+
+ Comments:
+ - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid
+ - If neither of these three flags are set;
+ the interface is NBMA.
+
+ - IFF_MULTICAST does not mean anything special:
+ multicasts can be used on all not-NBMA links.
+ IFF_MULTICAST means that this media uses special encapsulation
+ for multicast frames. Apparently, all IFF_POINTOPOINT and
+ IFF_BROADCAST devices are able to use multicasts too.
+ */
+
+/* IFLA_LINK.
+ For usual devices it is equal ifi_index.
+ If it is a "virtual interface" (f.e. tunnel), ifi_link
+ can point to real physical interface (f.e. for bandwidth calculations),
+ or maybe 0, what means, that real media is unknown (usual
+ for IPIP tunnels, when route to endpoint is allowed to change)
+ */
+
+/* Subtype attributes for IFLA_PROTINFO */
+enum {
+ IFLA_INET6_UNSPEC,
+ IFLA_INET6_FLAGS, /* link flags */
+ IFLA_INET6_CONF, /* sysctl parameters */
+ IFLA_INET6_STATS, /* statistics */
+ IFLA_INET6_MCAST, /* MC things. What of them? */
+ IFLA_INET6_CACHEINFO, /* time values and max reasm size */
+ IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */
+ IFLA_INET6_TOKEN, /* device token */
+ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */
+ __IFLA_INET6_MAX
+};
+
+#define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1)
+
+enum in6_addr_gen_mode {
+ IN6_ADDR_GEN_MODE_EUI64,
+ IN6_ADDR_GEN_MODE_NONE,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY,
+ IN6_ADDR_GEN_MODE_RANDOM,
+};
+
+/* Bridge section */
+
+enum {
+ IFLA_BR_UNSPEC,
+ IFLA_BR_FORWARD_DELAY,
+ IFLA_BR_HELLO_TIME,
+ IFLA_BR_MAX_AGE,
+ IFLA_BR_AGEING_TIME,
+ IFLA_BR_STP_STATE,
+ IFLA_BR_PRIORITY,
+ IFLA_BR_VLAN_FILTERING,
+ IFLA_BR_VLAN_PROTOCOL,
+ IFLA_BR_GROUP_FWD_MASK,
+ IFLA_BR_ROOT_ID,
+ IFLA_BR_BRIDGE_ID,
+ IFLA_BR_ROOT_PORT,
+ IFLA_BR_ROOT_PATH_COST,
+ IFLA_BR_TOPOLOGY_CHANGE,
+ IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+ IFLA_BR_HELLO_TIMER,
+ IFLA_BR_TCN_TIMER,
+ IFLA_BR_TOPOLOGY_CHANGE_TIMER,
+ IFLA_BR_GC_TIMER,
+ IFLA_BR_GROUP_ADDR,
+ IFLA_BR_FDB_FLUSH,
+ IFLA_BR_MCAST_ROUTER,
+ IFLA_BR_MCAST_SNOOPING,
+ IFLA_BR_MCAST_QUERY_USE_IFADDR,
+ IFLA_BR_MCAST_QUERIER,
+ IFLA_BR_MCAST_HASH_ELASTICITY,
+ IFLA_BR_MCAST_HASH_MAX,
+ IFLA_BR_MCAST_LAST_MEMBER_CNT,
+ IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+ IFLA_BR_MCAST_LAST_MEMBER_INTVL,
+ IFLA_BR_MCAST_MEMBERSHIP_INTVL,
+ IFLA_BR_MCAST_QUERIER_INTVL,
+ IFLA_BR_MCAST_QUERY_INTVL,
+ IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
+ IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
+ IFLA_BR_NF_CALL_IPTABLES,
+ IFLA_BR_NF_CALL_IP6TABLES,
+ IFLA_BR_NF_CALL_ARPTABLES,
+ IFLA_BR_VLAN_DEFAULT_PVID,
+ IFLA_BR_PAD,
+ IFLA_BR_VLAN_STATS_ENABLED,
+ IFLA_BR_MCAST_STATS_ENABLED,
+ IFLA_BR_MCAST_IGMP_VERSION,
+ IFLA_BR_MCAST_MLD_VERSION,
+ __IFLA_BR_MAX,
+};
+
+#define IFLA_BR_MAX (__IFLA_BR_MAX - 1)
+
+struct ifla_bridge_id {
+ __u8 prio[2];
+ __u8 addr[6]; /* ETH_ALEN */
+};
+
+enum {
+ BRIDGE_MODE_UNSPEC,
+ BRIDGE_MODE_HAIRPIN,
+};
+
+enum {
+ IFLA_BRPORT_UNSPEC,
+ IFLA_BRPORT_STATE, /* Spanning tree state */
+ IFLA_BRPORT_PRIORITY, /* " priority */
+ IFLA_BRPORT_COST, /* " cost */
+ IFLA_BRPORT_MODE, /* mode (hairpin) */
+ IFLA_BRPORT_GUARD, /* bpdu guard */
+ IFLA_BRPORT_PROTECT, /* root port protection */
+ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */
+ IFLA_BRPORT_LEARNING, /* mac learning */
+ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
+ IFLA_BRPORT_PROXYARP, /* proxy ARP */
+ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
+ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
+ IFLA_BRPORT_ROOT_ID, /* designated root */
+ IFLA_BRPORT_BRIDGE_ID, /* designated bridge */
+ IFLA_BRPORT_DESIGNATED_PORT,
+ IFLA_BRPORT_DESIGNATED_COST,
+ IFLA_BRPORT_ID,
+ IFLA_BRPORT_NO,
+ IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+ IFLA_BRPORT_CONFIG_PENDING,
+ IFLA_BRPORT_MESSAGE_AGE_TIMER,
+ IFLA_BRPORT_FORWARD_DELAY_TIMER,
+ IFLA_BRPORT_HOLD_TIMER,
+ IFLA_BRPORT_FLUSH,
+ IFLA_BRPORT_MULTICAST_ROUTER,
+ IFLA_BRPORT_PAD,
+ IFLA_BRPORT_MCAST_FLOOD,
+ IFLA_BRPORT_MCAST_TO_UCAST,
+ IFLA_BRPORT_VLAN_TUNNEL,
+ IFLA_BRPORT_BCAST_FLOOD,
+ IFLA_BRPORT_GROUP_FWD_MASK,
+ IFLA_BRPORT_NEIGH_SUPPRESS,
+ __IFLA_BRPORT_MAX
+};
+#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
+
+struct ifla_cacheinfo {
+ __u32 max_reasm_len;
+ __u32 tstamp; /* ipv6InterfaceTable updated timestamp */
+ __u32 reachable_time;
+ __u32 retrans_time;
+};
+
+enum {
+ IFLA_INFO_UNSPEC,
+ IFLA_INFO_KIND,
+ IFLA_INFO_DATA,
+ IFLA_INFO_XSTATS,
+ IFLA_INFO_SLAVE_KIND,
+ IFLA_INFO_SLAVE_DATA,
+ __IFLA_INFO_MAX,
+};
+
+#define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1)
+
+/* VLAN section */
+
+enum {
+ IFLA_VLAN_UNSPEC,
+ IFLA_VLAN_ID,
+ IFLA_VLAN_FLAGS,
+ IFLA_VLAN_EGRESS_QOS,
+ IFLA_VLAN_INGRESS_QOS,
+ IFLA_VLAN_PROTOCOL,
+ __IFLA_VLAN_MAX,
+};
+
+#define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1)
+
+struct ifla_vlan_flags {
+ __u32 flags;
+ __u32 mask;
+};
+
+enum {
+ IFLA_VLAN_QOS_UNSPEC,
+ IFLA_VLAN_QOS_MAPPING,
+ __IFLA_VLAN_QOS_MAX
+};
+
+#define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1)
+
+struct ifla_vlan_qos_mapping {
+ __u32 from;
+ __u32 to;
+};
+
+/* MACVLAN section */
+enum {
+ IFLA_MACVLAN_UNSPEC,
+ IFLA_MACVLAN_MODE,
+ IFLA_MACVLAN_FLAGS,
+ IFLA_MACVLAN_MACADDR_MODE,
+ IFLA_MACVLAN_MACADDR,
+ IFLA_MACVLAN_MACADDR_DATA,
+ IFLA_MACVLAN_MACADDR_COUNT,
+ __IFLA_MACVLAN_MAX,
+};
+
+#define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1)
+
+enum macvlan_mode {
+ MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
+ MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
+ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
+ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
+ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */
+};
+
+enum macvlan_macaddr_mode {
+ MACVLAN_MACADDR_ADD,
+ MACVLAN_MACADDR_DEL,
+ MACVLAN_MACADDR_FLUSH,
+ MACVLAN_MACADDR_SET,
+};
+
+#define MACVLAN_FLAG_NOPROMISC 1
+
+/* VRF section */
+enum {
+ IFLA_VRF_UNSPEC,
+ IFLA_VRF_TABLE,
+ __IFLA_VRF_MAX
+};
+
+#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+
+enum {
+ IFLA_VRF_PORT_UNSPEC,
+ IFLA_VRF_PORT_TABLE,
+ __IFLA_VRF_PORT_MAX
+};
+
+#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1)
+
+/* MACSEC section */
+enum {
+ IFLA_MACSEC_UNSPEC,
+ IFLA_MACSEC_SCI,
+ IFLA_MACSEC_PORT,
+ IFLA_MACSEC_ICV_LEN,
+ IFLA_MACSEC_CIPHER_SUITE,
+ IFLA_MACSEC_WINDOW,
+ IFLA_MACSEC_ENCODING_SA,
+ IFLA_MACSEC_ENCRYPT,
+ IFLA_MACSEC_PROTECT,
+ IFLA_MACSEC_INC_SCI,
+ IFLA_MACSEC_ES,
+ IFLA_MACSEC_SCB,
+ IFLA_MACSEC_REPLAY_PROTECT,
+ IFLA_MACSEC_VALIDATION,
+ IFLA_MACSEC_PAD,
+ __IFLA_MACSEC_MAX,
+};
+
+#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
+
+enum macsec_validation_type {
+ MACSEC_VALIDATE_DISABLED = 0,
+ MACSEC_VALIDATE_CHECK = 1,
+ MACSEC_VALIDATE_STRICT = 2,
+ __MACSEC_VALIDATE_END,
+ MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
+};
+
+/* IPVLAN section */
+enum {
+ IFLA_IPVLAN_UNSPEC,
+ IFLA_IPVLAN_MODE,
+ IFLA_IPVLAN_FLAGS,
+ __IFLA_IPVLAN_MAX
+};
+
+#define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1)
+
+enum ipvlan_mode {
+ IPVLAN_MODE_L2 = 0,
+ IPVLAN_MODE_L3,
+ IPVLAN_MODE_L3S,
+ IPVLAN_MODE_MAX
+};
+
+#define IPVLAN_F_PRIVATE 0x01
+#define IPVLAN_F_VEPA 0x02
+
+/* VXLAN section */
+enum {
+ IFLA_VXLAN_UNSPEC,
+ IFLA_VXLAN_ID,
+ IFLA_VXLAN_GROUP, /* group or remote address */
+ IFLA_VXLAN_LINK,
+ IFLA_VXLAN_LOCAL,
+ IFLA_VXLAN_TTL,
+ IFLA_VXLAN_TOS,
+ IFLA_VXLAN_LEARNING,
+ IFLA_VXLAN_AGEING,
+ IFLA_VXLAN_LIMIT,
+ IFLA_VXLAN_PORT_RANGE, /* source port */
+ IFLA_VXLAN_PROXY,
+ IFLA_VXLAN_RSC,
+ IFLA_VXLAN_L2MISS,
+ IFLA_VXLAN_L3MISS,
+ IFLA_VXLAN_PORT, /* destination port */
+ IFLA_VXLAN_GROUP6,
+ IFLA_VXLAN_LOCAL6,
+ IFLA_VXLAN_UDP_CSUM,
+ IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
+ IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ IFLA_VXLAN_REMCSUM_TX,
+ IFLA_VXLAN_REMCSUM_RX,
+ IFLA_VXLAN_GBP,
+ IFLA_VXLAN_REMCSUM_NOPARTIAL,
+ IFLA_VXLAN_COLLECT_METADATA,
+ IFLA_VXLAN_LABEL,
+ IFLA_VXLAN_GPE,
+ __IFLA_VXLAN_MAX
+};
+#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
+
+struct ifla_vxlan_port_range {
+ __be16 low;
+ __be16 high;
+};
+
+/* GENEVE section */
+enum {
+ IFLA_GENEVE_UNSPEC,
+ IFLA_GENEVE_ID,
+ IFLA_GENEVE_REMOTE,
+ IFLA_GENEVE_TTL,
+ IFLA_GENEVE_TOS,
+ IFLA_GENEVE_PORT, /* destination port */
+ IFLA_GENEVE_COLLECT_METADATA,
+ IFLA_GENEVE_REMOTE6,
+ IFLA_GENEVE_UDP_CSUM,
+ IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
+ IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
+ IFLA_GENEVE_LABEL,
+ __IFLA_GENEVE_MAX
+};
+#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+
+/* PPP section */
+enum {
+ IFLA_PPP_UNSPEC,
+ IFLA_PPP_DEV_FD,
+ __IFLA_PPP_MAX
+};
+#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
+
+/* GTP section */
+
+enum ifla_gtp_role {
+ GTP_ROLE_GGSN = 0,
+ GTP_ROLE_SGSN,
+};
+
+enum {
+ IFLA_GTP_UNSPEC,
+ IFLA_GTP_FD0,
+ IFLA_GTP_FD1,
+ IFLA_GTP_PDP_HASHSIZE,
+ IFLA_GTP_ROLE,
+ __IFLA_GTP_MAX,
+};
+#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
+
+/* Bonding section */
+
+enum {
+ IFLA_BOND_UNSPEC,
+ IFLA_BOND_MODE,
+ IFLA_BOND_ACTIVE_SLAVE,
+ IFLA_BOND_MIIMON,
+ IFLA_BOND_UPDELAY,
+ IFLA_BOND_DOWNDELAY,
+ IFLA_BOND_USE_CARRIER,
+ IFLA_BOND_ARP_INTERVAL,
+ IFLA_BOND_ARP_IP_TARGET,
+ IFLA_BOND_ARP_VALIDATE,
+ IFLA_BOND_ARP_ALL_TARGETS,
+ IFLA_BOND_PRIMARY,
+ IFLA_BOND_PRIMARY_RESELECT,
+ IFLA_BOND_FAIL_OVER_MAC,
+ IFLA_BOND_XMIT_HASH_POLICY,
+ IFLA_BOND_RESEND_IGMP,
+ IFLA_BOND_NUM_PEER_NOTIF,
+ IFLA_BOND_ALL_SLAVES_ACTIVE,
+ IFLA_BOND_MIN_LINKS,
+ IFLA_BOND_LP_INTERVAL,
+ IFLA_BOND_PACKETS_PER_SLAVE,
+ IFLA_BOND_AD_LACP_RATE,
+ IFLA_BOND_AD_SELECT,
+ IFLA_BOND_AD_INFO,
+ IFLA_BOND_AD_ACTOR_SYS_PRIO,
+ IFLA_BOND_AD_USER_PORT_KEY,
+ IFLA_BOND_AD_ACTOR_SYSTEM,
+ IFLA_BOND_TLB_DYNAMIC_LB,
+ __IFLA_BOND_MAX,
+};
+
+#define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1)
+
+enum {
+ IFLA_BOND_AD_INFO_UNSPEC,
+ IFLA_BOND_AD_INFO_AGGREGATOR,
+ IFLA_BOND_AD_INFO_NUM_PORTS,
+ IFLA_BOND_AD_INFO_ACTOR_KEY,
+ IFLA_BOND_AD_INFO_PARTNER_KEY,
+ IFLA_BOND_AD_INFO_PARTNER_MAC,
+ __IFLA_BOND_AD_INFO_MAX,
+};
+
+#define IFLA_BOND_AD_INFO_MAX (__IFLA_BOND_AD_INFO_MAX - 1)
+
+enum {
+ IFLA_BOND_SLAVE_UNSPEC,
+ IFLA_BOND_SLAVE_STATE,
+ IFLA_BOND_SLAVE_MII_STATUS,
+ IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
+ IFLA_BOND_SLAVE_PERM_HWADDR,
+ IFLA_BOND_SLAVE_QUEUE_ID,
+ IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+ IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+ __IFLA_BOND_SLAVE_MAX,
+};
+
+#define IFLA_BOND_SLAVE_MAX (__IFLA_BOND_SLAVE_MAX - 1)
+
+/* SR-IOV virtual function management section */
+
+enum {
+ IFLA_VF_INFO_UNSPEC,
+ IFLA_VF_INFO,
+ __IFLA_VF_INFO_MAX,
+};
+
+#define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1)
+
+enum {
+ IFLA_VF_UNSPEC,
+ IFLA_VF_MAC, /* Hardware queue specific attributes */
+ IFLA_VF_VLAN, /* VLAN ID and QoS */
+ IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
+ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
+ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
+ IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */
+ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
+ * on/off switch
+ */
+ IFLA_VF_STATS, /* network device statistics */
+ IFLA_VF_TRUST, /* Trust VF */
+ IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
+ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
+ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
+ __IFLA_VF_MAX,
+};
+
+#define IFLA_VF_MAX (__IFLA_VF_MAX - 1)
+
+struct ifla_vf_mac {
+ __u32 vf;
+ __u8 mac[32]; /* MAX_ADDR_LEN */
+};
+
+struct ifla_vf_vlan {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+};
+
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC,
+ IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX,
+};
+
+#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
+#define MAX_VLAN_LIST_LEN 1
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+ __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+};
+
+struct ifla_vf_tx_rate {
+ __u32 vf;
+ __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
+};
+
+struct ifla_vf_rate {
+ __u32 vf;
+ __u32 min_tx_rate; /* Min Bandwidth in Mbps */
+ __u32 max_tx_rate; /* Max Bandwidth in Mbps */
+};
+
+struct ifla_vf_spoofchk {
+ __u32 vf;
+ __u32 setting;
+};
+
+struct ifla_vf_guid {
+ __u32 vf;
+ __u64 guid;
+};
+
+enum {
+ IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+ __IFLA_VF_LINK_STATE_MAX,
+};
+
+struct ifla_vf_link_state {
+ __u32 vf;
+ __u32 link_state;
+};
+
+struct ifla_vf_rss_query_en {
+ __u32 vf;
+ __u32 setting;
+};
+
+enum {
+ IFLA_VF_STATS_RX_PACKETS,
+ IFLA_VF_STATS_TX_PACKETS,
+ IFLA_VF_STATS_RX_BYTES,
+ IFLA_VF_STATS_TX_BYTES,
+ IFLA_VF_STATS_BROADCAST,
+ IFLA_VF_STATS_MULTICAST,
+ IFLA_VF_STATS_PAD,
+ IFLA_VF_STATS_RX_DROPPED,
+ IFLA_VF_STATS_TX_DROPPED,
+ __IFLA_VF_STATS_MAX,
+};
+
+#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
+
+struct ifla_vf_trust {
+ __u32 vf;
+ __u32 setting;
+};
+
+/* VF ports management section
+ *
+ * Nested layout of set/get msg is:
+ *
+ * [IFLA_NUM_VF]
+ * [IFLA_VF_PORTS]
+ * [IFLA_VF_PORT]
+ * [IFLA_PORT_*], ...
+ * [IFLA_VF_PORT]
+ * [IFLA_PORT_*], ...
+ * ...
+ * [IFLA_PORT_SELF]
+ * [IFLA_PORT_*], ...
+ */
+
+enum {
+ IFLA_VF_PORT_UNSPEC,
+ IFLA_VF_PORT, /* nest */
+ __IFLA_VF_PORT_MAX,
+};
+
+#define IFLA_VF_PORT_MAX (__IFLA_VF_PORT_MAX - 1)
+
+enum {
+ IFLA_PORT_UNSPEC,
+ IFLA_PORT_VF, /* __u32 */
+ IFLA_PORT_PROFILE, /* string */
+ IFLA_PORT_VSI_TYPE, /* 802.1Qbg (pre-)standard VDP */
+ IFLA_PORT_INSTANCE_UUID, /* binary UUID */
+ IFLA_PORT_HOST_UUID, /* binary UUID */
+ IFLA_PORT_REQUEST, /* __u8 */
+ IFLA_PORT_RESPONSE, /* __u16, output only */
+ __IFLA_PORT_MAX,
+};
+
+#define IFLA_PORT_MAX (__IFLA_PORT_MAX - 1)
+
+#define PORT_PROFILE_MAX 40
+#define PORT_UUID_MAX 16
+#define PORT_SELF_VF -1
+
+enum {
+ PORT_REQUEST_PREASSOCIATE = 0,
+ PORT_REQUEST_PREASSOCIATE_RR,
+ PORT_REQUEST_ASSOCIATE,
+ PORT_REQUEST_DISASSOCIATE,
+};
+
+enum {
+ PORT_VDP_RESPONSE_SUCCESS = 0,
+ PORT_VDP_RESPONSE_INVALID_FORMAT,
+ PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES,
+ PORT_VDP_RESPONSE_UNUSED_VTID,
+ PORT_VDP_RESPONSE_VTID_VIOLATION,
+ PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION,
+ PORT_VDP_RESPONSE_OUT_OF_SYNC,
+ /* 0x08-0xFF reserved for future VDP use */
+ PORT_PROFILE_RESPONSE_SUCCESS = 0x100,
+ PORT_PROFILE_RESPONSE_INPROGRESS,
+ PORT_PROFILE_RESPONSE_INVALID,
+ PORT_PROFILE_RESPONSE_BADSTATE,
+ PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES,
+ PORT_PROFILE_RESPONSE_ERROR,
+};
+
+struct ifla_port_vsi {
+ __u8 vsi_mgr_id;
+ __u8 vsi_type_id[3];
+ __u8 vsi_type_version;
+ __u8 pad[3];
+};
+
+
+/* IPoIB section */
+
+enum {
+ IFLA_IPOIB_UNSPEC,
+ IFLA_IPOIB_PKEY,
+ IFLA_IPOIB_MODE,
+ IFLA_IPOIB_UMCAST,
+ __IFLA_IPOIB_MAX
+};
+
+enum {
+ IPOIB_MODE_DATAGRAM = 0, /* using unreliable datagram QPs */
+ IPOIB_MODE_CONNECTED = 1, /* using connected QPs */
+};
+
+#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
+
+
+/* HSR section */
+
+enum {
+ IFLA_HSR_UNSPEC,
+ IFLA_HSR_SLAVE1,
+ IFLA_HSR_SLAVE2,
+ IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
+ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
+ IFLA_HSR_SEQ_NR,
+ IFLA_HSR_VERSION, /* HSR version */
+ __IFLA_HSR_MAX,
+};
+
+#define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
+
+/* STATS section */
+
+struct if_stats_msg {
+ __u8 family;
+ __u8 pad1;
+ __u16 pad2;
+ __u32 ifindex;
+ __u32 filter_mask;
+};
+
+/* A stats attribute can be netdev specific or a global stat.
+ * For netdev stats, lets use the prefix IFLA_STATS_LINK_*
+ */
+enum {
+ IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */
+ IFLA_STATS_LINK_64,
+ IFLA_STATS_LINK_XSTATS,
+ IFLA_STATS_LINK_XSTATS_SLAVE,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS,
+ IFLA_STATS_AF_SPEC,
+ __IFLA_STATS_MAX,
+};
+
+#define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1)
+
+#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
+
+/* These are embedded into IFLA_STATS_LINK_XSTATS:
+ * [IFLA_STATS_LINK_XSTATS]
+ * -> [LINK_XSTATS_TYPE_xxx]
+ * -> [rtnl link type specific attributes]
+ */
+enum {
+ LINK_XSTATS_TYPE_UNSPEC,
+ LINK_XSTATS_TYPE_BRIDGE,
+ __LINK_XSTATS_TYPE_MAX
+};
+#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+
+/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ __IFLA_OFFLOAD_XSTATS_MAX
+};
+#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+
+/* XDP section */
+
+#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
+#define XDP_FLAGS_SKB_MODE (1U << 1)
+#define XDP_FLAGS_DRV_MODE (1U << 2)
+#define XDP_FLAGS_HW_MODE (1U << 3)
+#define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \
+ XDP_FLAGS_DRV_MODE | \
+ XDP_FLAGS_HW_MODE)
+#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
+ XDP_FLAGS_MODES)
+
+/* These are stored into IFLA_XDP_ATTACHED on dump. */
+enum {
+ XDP_ATTACHED_NONE = 0,
+ XDP_ATTACHED_DRV,
+ XDP_ATTACHED_SKB,
+ XDP_ATTACHED_HW,
+};
+
+enum {
+ IFLA_XDP_UNSPEC,
+ IFLA_XDP_FD,
+ IFLA_XDP_ATTACHED,
+ IFLA_XDP_FLAGS,
+ IFLA_XDP_PROG_ID,
+ __IFLA_XDP_MAX,
+};
+
+#define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1)
+
+enum {
+ IFLA_EVENT_NONE,
+ IFLA_EVENT_REBOOT, /* internal reset / reboot */
+ IFLA_EVENT_FEATURES, /* change in offload features */
+ IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */
+ IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */
+ IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */
+ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
+};
+
+#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h
index 481e103da78e..ef1305010925 100644
--- a/tools/include/uapi/linux/kcmp.h
+++ b/tools/include/uapi/linux/kcmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_LINUX_KCMP_H
#define _UAPI_LINUX_KCMP_H
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 7e99999d6236..0fb5ef939732 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
struct kvm_s390_irq_state {
__u64 buf;
- __u32 flags;
+ __u32 flags; /* will stay unused for compatibility reasons */
__u32 len;
- __u32 reserved[4];
+ __u32 reserved[4]; /* will stay unused for compatibility reasons */
};
/* for KVM_SET_GUEST_DEBUG */
@@ -931,6 +931,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_SMT_POSSIBLE 147
#define KVM_CAP_HYPERV_SYNIC2 148
#define KVM_CAP_HYPERV_VP_INDEX 149
+#define KVM_CAP_S390_AIS_MIGRATION 150
+#define KVM_CAP_PPC_GET_CPU_CHAR 151
+#define KVM_CAP_S390_BPB 152
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1260,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
/* Available with KVM_CAP_PPC_RADIX_MMU */
#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
+/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
+#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1357,6 +1362,96 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_CMMA_MIGRATION */
#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
+/* Memory Encryption Commands */
+#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
+
+struct kvm_enc_region {
+ __u64 addr;
+ __u64 size;
+};
+
+#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
+#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
+
+/* Secure Encrypted Virtualization command */
+enum sev_cmd_id {
+ /* Guest initialization commands */
+ KVM_SEV_INIT = 0,
+ KVM_SEV_ES_INIT,
+ /* Guest launch commands */
+ KVM_SEV_LAUNCH_START,
+ KVM_SEV_LAUNCH_UPDATE_DATA,
+ KVM_SEV_LAUNCH_UPDATE_VMSA,
+ KVM_SEV_LAUNCH_SECRET,
+ KVM_SEV_LAUNCH_MEASURE,
+ KVM_SEV_LAUNCH_FINISH,
+ /* Guest migration commands (outgoing) */
+ KVM_SEV_SEND_START,
+ KVM_SEV_SEND_UPDATE_DATA,
+ KVM_SEV_SEND_UPDATE_VMSA,
+ KVM_SEV_SEND_FINISH,
+ /* Guest migration commands (incoming) */
+ KVM_SEV_RECEIVE_START,
+ KVM_SEV_RECEIVE_UPDATE_DATA,
+ KVM_SEV_RECEIVE_UPDATE_VMSA,
+ KVM_SEV_RECEIVE_FINISH,
+ /* Guest status and debug commands */
+ KVM_SEV_GUEST_STATUS,
+ KVM_SEV_DBG_DECRYPT,
+ KVM_SEV_DBG_ENCRYPT,
+ /* Guest certificates commands */
+ KVM_SEV_CERT_EXPORT,
+
+ KVM_SEV_NR_MAX,
+};
+
+struct kvm_sev_cmd {
+ __u32 id;
+ __u64 data;
+ __u32 error;
+ __u32 sev_fd;
+};
+
+struct kvm_sev_launch_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 dh_uaddr;
+ __u32 dh_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_launch_update_data {
+ __u64 uaddr;
+ __u32 len;
+};
+
+
+struct kvm_sev_launch_secret {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
+struct kvm_sev_launch_measure {
+ __u64 uaddr;
+ __u32 len;
+};
+
+struct kvm_sev_guest_status {
+ __u32 handle;
+ __u32 policy;
+ __u32 state;
+};
+
+struct kvm_sev_dbg {
+ __u64 src_uaddr;
+ __u64 dst_uaddr;
+ __u32 len;
+};
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/tools/include/uapi/linux/netlink.h b/tools/include/uapi/linux/netlink.h
new file mode 100644
index 000000000000..776bc92e9118
--- /dev/null
+++ b/tools/include/uapi/linux/netlink.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_NETLINK_H
+#define _UAPI__LINUX_NETLINK_H
+
+#include <linux/kernel.h>
+#include <linux/socket.h> /* for __kernel_sa_family_t */
+#include <linux/types.h>
+
+#define NETLINK_ROUTE 0 /* Routing/device hook */
+#define NETLINK_UNUSED 1 /* Unused number */
+#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
+#define NETLINK_FIREWALL 3 /* Unused number, formerly ip_queue */
+#define NETLINK_SOCK_DIAG 4 /* socket monitoring */
+#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
+#define NETLINK_XFRM 6 /* ipsec */
+#define NETLINK_SELINUX 7 /* SELinux event notifications */
+#define NETLINK_ISCSI 8 /* Open-iSCSI */
+#define NETLINK_AUDIT 9 /* auditing */
+#define NETLINK_FIB_LOOKUP 10
+#define NETLINK_CONNECTOR 11
+#define NETLINK_NETFILTER 12 /* netfilter subsystem */
+#define NETLINK_IP6_FW 13
+#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
+#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
+#define NETLINK_GENERIC 16
+/* leave room for NETLINK_DM (DM Events) */
+#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
+#define NETLINK_ECRYPTFS 19
+#define NETLINK_RDMA 20
+#define NETLINK_CRYPTO 21 /* Crypto layer */
+#define NETLINK_SMC 22 /* SMC monitoring */
+
+#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG
+
+#define MAX_LINKS 32
+
+struct sockaddr_nl {
+ __kernel_sa_family_t nl_family; /* AF_NETLINK */
+ unsigned short nl_pad; /* zero */
+ __u32 nl_pid; /* port ID */
+ __u32 nl_groups; /* multicast groups mask */
+};
+
+struct nlmsghdr {
+ __u32 nlmsg_len; /* Length of message including header */
+ __u16 nlmsg_type; /* Message content */
+ __u16 nlmsg_flags; /* Additional flags */
+ __u32 nlmsg_seq; /* Sequence number */
+ __u32 nlmsg_pid; /* Sending process port ID */
+};
+
+/* Flags values */
+
+#define NLM_F_REQUEST 0x01 /* It is request message. */
+#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */
+#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */
+#define NLM_F_ECHO 0x08 /* Echo this request */
+#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */
+#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */
+
+/* Modifiers to GET request */
+#define NLM_F_ROOT 0x100 /* specify tree root */
+#define NLM_F_MATCH 0x200 /* return all matching */
+#define NLM_F_ATOMIC 0x400 /* atomic GET */
+#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH)
+
+/* Modifiers to NEW request */
+#define NLM_F_REPLACE 0x100 /* Override existing */
+#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */
+#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
+#define NLM_F_APPEND 0x800 /* Add to end of list */
+
+/* Modifiers to DELETE request */
+#define NLM_F_NONREC 0x100 /* Do not delete recursively */
+
+/* Flags for ACK message */
+#define NLM_F_CAPPED 0x100 /* request was capped */
+#define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */
+
+/*
+ 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
+ 4.4BSD CHANGE NLM_F_REPLACE
+
+ True CHANGE NLM_F_CREATE|NLM_F_REPLACE
+ Append NLM_F_CREATE
+ Check NLM_F_EXCL
+ */
+
+#define NLMSG_ALIGNTO 4U
+#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
+#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
+#define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN)
+#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
+#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
+#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
+ (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
+#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
+ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
+ (nlh)->nlmsg_len <= (len))
+#define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len)))
+
+#define NLMSG_NOOP 0x1 /* Nothing. */
+#define NLMSG_ERROR 0x2 /* Error */
+#define NLMSG_DONE 0x3 /* End of a dump */
+#define NLMSG_OVERRUN 0x4 /* Data lost */
+
+#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
+
+struct nlmsgerr {
+ int error;
+ struct nlmsghdr msg;
+ /*
+ * followed by the message contents unless NETLINK_CAP_ACK was set
+ * or the ACK indicates success (error == 0)
+ * message length is aligned with NLMSG_ALIGN()
+ */
+ /*
+ * followed by TLVs defined in enum nlmsgerr_attrs
+ * if NETLINK_EXT_ACK was set
+ */
+};
+
+/**
+ * enum nlmsgerr_attrs - nlmsgerr attributes
+ * @NLMSGERR_ATTR_UNUSED: unused
+ * @NLMSGERR_ATTR_MSG: error message string (string)
+ * @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original
+ * message, counting from the beginning of the header (u32)
+ * @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to
+ * be used - in the success case - to identify a created
+ * object or operation or similar (binary)
+ * @__NLMSGERR_ATTR_MAX: number of attributes
+ * @NLMSGERR_ATTR_MAX: highest attribute number
+ */
+enum nlmsgerr_attrs {
+ NLMSGERR_ATTR_UNUSED,
+ NLMSGERR_ATTR_MSG,
+ NLMSGERR_ATTR_OFFS,
+ NLMSGERR_ATTR_COOKIE,
+
+ __NLMSGERR_ATTR_MAX,
+ NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
+};
+
+#define NETLINK_ADD_MEMBERSHIP 1
+#define NETLINK_DROP_MEMBERSHIP 2
+#define NETLINK_PKTINFO 3
+#define NETLINK_BROADCAST_ERROR 4
+#define NETLINK_NO_ENOBUFS 5
+#ifndef __KERNEL__
+#define NETLINK_RX_RING 6
+#define NETLINK_TX_RING 7
+#endif
+#define NETLINK_LISTEN_ALL_NSID 8
+#define NETLINK_LIST_MEMBERSHIPS 9
+#define NETLINK_CAP_ACK 10
+#define NETLINK_EXT_ACK 11
+
+struct nl_pktinfo {
+ __u32 group;
+};
+
+struct nl_mmap_req {
+ unsigned int nm_block_size;
+ unsigned int nm_block_nr;
+ unsigned int nm_frame_size;
+ unsigned int nm_frame_nr;
+};
+
+struct nl_mmap_hdr {
+ unsigned int nm_status;
+ unsigned int nm_len;
+ __u32 nm_group;
+ /* credentials */
+ __u32 nm_pid;
+ __u32 nm_uid;
+ __u32 nm_gid;
+};
+
+#ifndef __KERNEL__
+enum nl_mmap_status {
+ NL_MMAP_STATUS_UNUSED,
+ NL_MMAP_STATUS_RESERVED,
+ NL_MMAP_STATUS_VALID,
+ NL_MMAP_STATUS_COPY,
+ NL_MMAP_STATUS_SKIP,
+};
+
+#define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO
+#define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT)
+#define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr))
+#endif
+
+#define NET_MAJOR 36 /* Major 36 is reserved for networking */
+
+enum {
+ NETLINK_UNCONNECTED = 0,
+ NETLINK_CONNECTED,
+};
+
+/*
+ * <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)-->
+ * +---------------------+- - -+- - - - - - - - - -+- - -+
+ * | Header | Pad | Payload | Pad |
+ * | (struct nlattr) | ing | | ing |
+ * +---------------------+- - -+- - - - - - - - - -+- - -+
+ * <-------------- nlattr->nla_len -------------->
+ */
+
+struct nlattr {
+ __u16 nla_len;
+ __u16 nla_type;
+};
+
+/*
+ * nla_type (16 bits)
+ * +---+---+-------------------------------+
+ * | N | O | Attribute Type |
+ * +---+---+-------------------------------+
+ * N := Carries nested attributes
+ * O := Payload stored in network byte order
+ *
+ * Note: The N and O flag are mutually exclusive.
+ */
+#define NLA_F_NESTED (1 << 15)
+#define NLA_F_NET_BYTEORDER (1 << 14)
+#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
+
+#define NLA_ALIGNTO 4
+#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
+#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
+
+/* Generic 32 bitflags attribute content sent to the kernel.
+ *
+ * The value is a bitmap that defines the values being set
+ * The selector is a bitmask that defines which value is legit
+ *
+ * Examples:
+ * value = 0x0, and selector = 0x1
+ * implies we are selecting bit 1 and we want to set its value to 0.
+ *
+ * value = 0x2, and selector = 0x2
+ * implies we are selecting bit 2 and we want to set its value to 1.
+ *
+ */
+struct nla_bitfield32 {
+ __u32 value;
+ __u32 selector;
+};
+
+#endif /* _UAPI__LINUX_NETLINK_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 362493a2f950..e0739a1aa4b2 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
__u16 __reserved_2; /* align to __u64 */
};
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+ /*
+ * The below ids array length
+ */
+ __u32 ids_len;
+ /*
+ * Set by the kernel to indicate the number of
+ * available programs
+ */
+ __u32 prog_cnt;
+ /*
+ * User provided buffer to store program ids
+ */
+ __u32 ids[0];
+};
+
#define perf_flags(attr) (*(&(attr)->read_format + 1))
/*
@@ -433,6 +454,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -612,9 +634,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -864,6 +889,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
@@ -942,6 +968,7 @@ enum perf_callchain_context {
#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index a8d0759a9e40..af5f8c2df87a 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_PRCTL_H
#define _LINUX_PRCTL_H
@@ -197,4 +198,13 @@ struct prctl_mm_map {
# define PR_CAP_AMBIENT_LOWER 3
# define PR_CAP_AMBIENT_CLEAR_ALL 4
+/* arm64 Scalable Vector Extension controls */
+/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
+#define PR_SVE_SET_VL 50 /* set task vector length */
+# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SVE_GET_VL 51 /* get task vector length */
+/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
+# define PR_SVE_VL_LEN_MASK 0xffff
+# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index 30a9e51bbb1e..22627f80063e 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -49,5 +49,10 @@
*/
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
+#define SCHED_FLAG_DL_OVERRUN 0x04
+
+#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
+ SCHED_FLAG_RECLAIM | \
+ SCHED_FLAG_DL_OVERRUN)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index c227ccba60ae..07d61583fd02 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -214,6 +214,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
+#define SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */
+/* gap in the numbering for a future standard linear format */
#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
@@ -248,6 +253,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_LE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
@@ -259,6 +266,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_BE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_BE
#endif
typedef int __bitwise snd_pcm_subformat_t;
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 217cf6f95c36..a5684d0968b4 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -478,7 +478,7 @@ class Provider(object):
@staticmethod
def is_field_wanted(fields_filter, field):
"""Indicate whether field is valid according to fields_filter."""
- if not fields_filter or fields_filter == "help":
+ if not fields_filter:
return True
return re.match(fields_filter, field) is not None
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
- self._fields = [field for field in self.get_available_fields()
- if self.is_field_wanted(fields_filter, field)]
+ self.fields = [field for field in self.get_available_fields()
+ if self.is_field_wanted(fields_filter, field)]
@staticmethod
def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
curses.nocbreak()
curses.endwin()
- def get_all_gnames(self):
+ @staticmethod
+ def get_all_gnames():
"""Returns a list of (pid, gname) tuples of all running guests"""
res = []
try:
@@ -963,7 +964,7 @@ class Tui(object):
# perform a sanity check before calling the more expensive
# function to possibly extract the guest name
if ' -name ' in line[1]:
- res.append((line[0], self.get_gname_from_pid(line[0])))
+ res.append((line[0], Tui.get_gname_from_pid(line[0])))
child.stdout.close()
return res
@@ -984,7 +985,8 @@ class Tui(object):
except Exception:
self.screen.addstr(row + 1, 2, 'Not available')
- def get_pid_from_gname(self, gname):
+ @staticmethod
+ def get_pid_from_gname(gname):
"""Fuzzy function to convert guest name to QEMU process pid.
Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
"""
pids = []
- for line in self.get_all_gnames():
+ for line in Tui.get_all_gnames():
if gname == line[1]:
pids.append(int(line[0]))
@@ -1090,15 +1092,16 @@ class Tui(object):
# sort by totals
return (0, -stats[x][0])
total = 0.
- for val in stats.values():
- total += val[0]
+ for key in stats.keys():
+ if key.find('(') is -1:
+ total += stats[key][0]
if self._sorting == SORT_DEFAULT:
sortkey = sortCurAvg
else:
sortkey = sortTotal
+ tavg = 0
for key in sorted(stats.keys(), key=sortkey):
-
- if row >= self.screen.getmaxyx()[0]:
+ if row >= self.screen.getmaxyx()[0] - 1:
break
values = stats[key]
if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
(key, values[0], values[0] * 100 / total,
cur))
+ if cur is not '' and key.find('(') is -1:
+ tavg += cur
row += 1
if row == 3:
self.screen.addstr(4, 1, 'No matching events reported yet')
+ else:
+ self.screen.addstr(row, 1, '%-40s %10d %8s' %
+ ('Total', total, tavg if tavg else ''),
+ curses.A_BOLD)
self.screen.refresh()
def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
if char == 'x':
self.update_drilldown()
# prevents display of current values on next refresh
- self.stats.get()
+ self.stats.get(self._display_guests)
except KeyboardInterrupt:
break
except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
try:
pids = Tui.get_pid_from_gname(val)
except:
- raise optparse.OptionValueError('Error while searching for guest '
- '"{}", use "-p" to specify a pid '
- 'instead'.format(val))
+ sys.exit('Error while searching for guest "{}". Use "-p" to '
+ 'specify a pid instead?'.format(val))
if len(pids) == 0:
- raise optparse.OptionValueError('No guest by the name "{}" '
- 'found'.format(val))
+ sys.exit('Error: No guest by the name "{}" found'.format(val))
if len(pids) > 1:
- raise optparse.OptionValueError('Multiple processes found (pids: '
- '{}) - use "-p" to specify a pid '
- 'instead'.format(" ".join(pids)))
+ sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
+ 'to specify the desired pid'.format(" ".join(pids)))
parser.values.pid = pids[0]
optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
help='restrict statistics to guest by name',
callback=cb_guest_to_pid,
)
- (options, _) = optparser.parse_args(sys.argv)
+ options, unkn = optparser.parse_args(sys.argv)
+ if len(unkn) != 1:
+ sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+ try:
+ # verify that we were passed a valid regex up front
+ re.compile(options.fields)
+ except re.error:
+ sys.exit('Error: "' + options.fields + '" is not a valid regular '
+ 'expression')
+
return options
@@ -1564,16 +1579,13 @@ def main():
stats = Stats(options)
- if options.fields == "help":
- event_list = "\n"
- s = stats.get()
- for key in s.keys():
- if key.find('(') != -1:
- key = key[0:key.find('(')]
- if event_list.find('\n' + key + '\n') == -1:
- event_list += key + '\n'
- sys.stdout.write(event_list)
- return ""
+ if options.fields == 'help':
+ stats.fields_filter = None
+ event_list = []
+ for key in stats.get().keys():
+ event_list.append(key.split('(', 1)[0])
+ sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n')
+ sys.exit(0)
if options.log:
log(stats)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index e5cf836be8a1..b5b3810c9e94 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
*s*:: set update interval
*x*:: toggle reporting of stats for child trace events
+ :: *Note*: The stats for the parents summarize the respective child trace
+ events
Press any other key to refresh statistics immediately.
@@ -86,7 +88,7 @@ OPTIONS
-f<fields>::
--fields=<fields>::
- fields to display (regex)
+ fields to display (regex), "-f help" for a list of available events
-h::
--help::
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index d8749756352d..64c679d67109 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o
+libbpf-y := libbpf.o bpf.o nlattr.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 4555304dc18e..e6d5f8d1477f 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -93,7 +93,6 @@ export prefix libdir src obj
# Shell quotes
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
LIB_FILE = libbpf.a libbpf.so
@@ -150,7 +149,7 @@ CMD_TARGETS = $(LIB_FILE)
TARGETS = $(CMD_TARGETS)
-all: fixdep $(VERSION_FILES) all_cmd
+all: fixdep all_cmd
all_cmd: $(CMD_TARGETS)
@@ -161,6 +160,12 @@ $(BPF_IN): force elfdep bpfdep
@(test -f ../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
(diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf_common.h' differs from latest version at 'include/uapi/linux/bpf_common.h'" >&2 )) || true
+ @(test -f ../../include/uapi/linux/netlink.h -a -f ../../../include/uapi/linux/netlink.h && ( \
+ (diff -B ../../include/uapi/linux/netlink.h ../../../include/uapi/linux/netlink.h >/dev/null) || \
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/netlink.h' differs from latest version at 'include/uapi/linux/netlink.h'" >&2 )) || true
+ @(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
+ (diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf
$(OUTPUT)libbpf.so: $(BPF_IN)
@@ -169,21 +174,11 @@ $(OUTPUT)libbpf.so: $(BPF_IN)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
-define update_dir
- (echo $1 > $@.tmp; \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
-endef
-
define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
- $(INSTALL) $1 '$(DESTDIR_SQ)$2'
+ $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd
@@ -192,7 +187,8 @@ install_lib: all_cmd
install_headers:
$(call QUIET_INSTALL, headers) \
- $(call do_install,bpf.h,$(prefix)/include/bpf,644)
+ $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
install: install_lib
@@ -203,7 +199,7 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
+ $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so .*.d .*.cmd \
$(RM) LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
@@ -213,10 +209,10 @@ PHONY += force elfdep bpfdep
force:
elfdep:
- @if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit -1 ; fi
+ @if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit 1 ; fi
bpfdep:
- @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit -1 ; fi
+ @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 5128677e4117..592a58a2b681 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: LGPL-2.1
+
/*
* common eBPF ELF operations.
*
@@ -25,6 +27,16 @@
#include <asm/unistd.h>
#include <linux/bpf.h>
#include "bpf.h"
+#include "libbpf.h"
+#include "nlattr.h"
+#include <linux/rtnetlink.h>
+#include <linux/if_link.h>
+#include <sys/socket.h>
+#include <errno.h>
+
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
/*
* When building perf, unistd.h is overridden. __NR_bpf is
@@ -46,7 +58,9 @@
# endif
#endif
+#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
+#endif
static inline __u64 ptr_to_u64(const void *ptr)
{
@@ -413,3 +427,124 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
return err;
}
+
+int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+{
+ struct sockaddr_nl sa;
+ int sock, seq = 0, len, ret = -1;
+ char buf[4096];
+ struct nlattr *nla, *nla_xdp;
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg ifinfo;
+ char attrbuf[64];
+ } req;
+ struct nlmsghdr *nh;
+ struct nlmsgerr *err;
+ socklen_t addrlen;
+ int one = 1;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0) {
+ return -errno;
+ }
+
+ if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one)) < 0) {
+ fprintf(stderr, "Netlink error reporting not supported\n");
+ }
+
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ addrlen = sizeof(sa);
+ if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (addrlen != sizeof(sa)) {
+ ret = -LIBBPF_ERRNO__INTERNAL;
+ goto cleanup;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_SETLINK;
+ req.nh.nlmsg_pid = 0;
+ req.nh.nlmsg_seq = ++seq;
+ req.ifinfo.ifi_family = AF_UNSPEC;
+ req.ifinfo.ifi_index = ifindex;
+
+ /* started nested attribute for XDP */
+ nla = (struct nlattr *)(((char *)&req)
+ + NLMSG_ALIGN(req.nh.nlmsg_len));
+ nla->nla_type = NLA_F_NESTED | IFLA_XDP;
+ nla->nla_len = NLA_HDRLEN;
+
+ /* add XDP fd */
+ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
+ nla_xdp->nla_type = IFLA_XDP_FD;
+ nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
+ memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
+ nla->nla_len += nla_xdp->nla_len;
+
+ /* if user passed in any flags, add those too */
+ if (flags) {
+ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
+ nla_xdp->nla_type = IFLA_XDP_FLAGS;
+ nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
+ memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
+ nla->nla_len += nla_xdp->nla_len;
+ }
+
+ req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
+
+ if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
+ nh = NLMSG_NEXT(nh, len)) {
+ if (nh->nlmsg_pid != sa.nl_pid) {
+ ret = -LIBBPF_ERRNO__WRNGPID;
+ goto cleanup;
+ }
+ if (nh->nlmsg_seq != seq) {
+ ret = -LIBBPF_ERRNO__INVSEQ;
+ goto cleanup;
+ }
+ switch (nh->nlmsg_type) {
+ case NLMSG_ERROR:
+ err = (struct nlmsgerr *)NLMSG_DATA(nh);
+ if (!err->error)
+ continue;
+ ret = err->error;
+ nla_dump_errormsg(nh);
+ goto cleanup;
+ case NLMSG_DONE:
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = 0;
+
+cleanup:
+ close(sock);
+ return ret;
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6534889e2b2f..8d18fb73d7fb 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
+
/*
* common eBPF ELF operations.
*
@@ -40,7 +42,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
__u32 map_flags);
/* Recommend log buffer size */
-#define BPF_LOG_BUF_SIZE 65536
+#define BPF_LOG_BUF_SIZE (256 * 1024)
int bpf_load_program_name(enum bpf_prog_type type, const char *name,
const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5aa45f89da93..97073d649c1a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: LGPL-2.1
+
/*
* Common eBPF ELF object loading operations.
*
@@ -106,6 +108,8 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
[ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
[ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
[ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
+ [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
+ [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
};
int libbpf_strerror(int err, char *buf, size_t size)
@@ -174,12 +178,19 @@ struct bpf_program {
char *name;
char *section_name;
struct bpf_insn *insns;
- size_t insns_cnt;
+ size_t insns_cnt, main_prog_cnt;
enum bpf_prog_type type;
- struct {
+ struct reloc_desc {
+ enum {
+ RELO_LD64,
+ RELO_CALL,
+ } type;
int insn_idx;
- int map_idx;
+ union {
+ int map_idx;
+ int text_off;
+ };
} *reloc_desc;
int nr_reloc;
@@ -234,6 +245,7 @@ struct bpf_object {
} *reloc;
int nr_reloc;
int maps_shndx;
+ int text_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
@@ -307,8 +319,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->section_name = strdup(section_name);
if (!prog->section_name) {
- pr_warning("failed to alloc name for prog under section %s\n",
- section_name);
+ pr_warning("failed to alloc name for prog under section(%d) %s\n",
+ idx, section_name);
goto errout;
}
@@ -375,9 +387,13 @@ bpf_object__init_prog_names(struct bpf_object *obj)
size_t pi, si;
for (pi = 0; pi < obj->nr_programs; pi++) {
- char *name = NULL;
+ const char *name = NULL;
prog = &obj->programs[pi];
+ if (prog->idx == obj->efile.text_shndx) {
+ name = ".text";
+ goto skip_search;
+ }
for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
si++) {
@@ -387,6 +403,8 @@ bpf_object__init_prog_names(struct bpf_object *obj)
continue;
if (sym.st_shndx != prog->idx)
continue;
+ if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
+ continue;
name = elf_strptr(obj->efile.elf,
obj->efile.strtabidx,
@@ -403,7 +421,7 @@ bpf_object__init_prog_names(struct bpf_object *obj)
prog->section_name);
return -EINVAL;
}
-
+skip_search:
prog->name = strdup(name);
if (!prog->name) {
pr_warning("failed to allocate memory for prog sym %s\n",
@@ -724,6 +742,24 @@ bpf_object__init_maps(struct bpf_object *obj)
return 0;
}
+static bool section_have_execinstr(struct bpf_object *obj, int idx)
+{
+ Elf_Scn *scn;
+ GElf_Shdr sh;
+
+ scn = elf_getscn(obj->efile.elf, idx);
+ if (!scn)
+ return false;
+
+ if (gelf_getshdr(scn, &sh) != &sh)
+ return false;
+
+ if (sh.sh_flags & SHF_EXECINSTR)
+ return true;
+
+ return false;
+}
+
static int bpf_object__elf_collect(struct bpf_object *obj)
{
Elf *elf = obj->efile.elf;
@@ -745,29 +781,29 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warning("failed to get section header from %s\n",
- obj->path);
+ pr_warning("failed to get section(%d) header from %s\n",
+ idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!name) {
- pr_warning("failed to get section name from %s\n",
- obj->path);
+ pr_warning("failed to get section(%d) name from %s\n",
+ idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
data = elf_getdata(scn, 0);
if (!data) {
- pr_warning("failed to get section data from %s(%s)\n",
- name, obj->path);
+ pr_warning("failed to get section(%d) data from %s(%s)\n",
+ idx, name, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
- pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
- name, (unsigned long)data->d_size,
+ pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+ idx, name, (unsigned long)data->d_size,
(int)sh.sh_link, (unsigned long)sh.sh_flags,
(int)sh.sh_type);
@@ -793,6 +829,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if ((sh.sh_type == SHT_PROGBITS) &&
(sh.sh_flags & SHF_EXECINSTR) &&
(data->d_size > 0)) {
+ if (strcmp(name, ".text") == 0)
+ obj->efile.text_shndx = idx;
err = bpf_object__add_program(obj, data->d_buf,
data->d_size, name, idx);
if (err) {
@@ -805,6 +843,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (sh.sh_type == SHT_REL) {
void *reloc = obj->efile.reloc;
int nr_reloc = obj->efile.nr_reloc + 1;
+ int sec = sh.sh_info; /* points to other section */
+
+ /* Only do relo for section with exec instructions */
+ if (!section_have_execinstr(obj, sec)) {
+ pr_debug("skip relo %s(%d) for section(%d)\n",
+ name, idx, sec);
+ continue;
+ }
reloc = realloc(reloc,
sizeof(*obj->efile.reloc) * nr_reloc);
@@ -820,6 +866,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
obj->efile.reloc[n].shdr = sh;
obj->efile.reloc[n].data = data;
}
+ } else {
+ pr_debug("skip section(%d) %s\n", idx, name);
}
if (err)
goto out;
@@ -854,11 +902,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
}
static int
-bpf_program__collect_reloc(struct bpf_program *prog,
- size_t nr_maps, GElf_Shdr *shdr,
- Elf_Data *data, Elf_Data *symbols,
- int maps_shndx, struct bpf_map *maps)
+bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
+ Elf_Data *data, struct bpf_object *obj)
{
+ Elf_Data *symbols = obj->efile.symbols;
+ int text_shndx = obj->efile.text_shndx;
+ int maps_shndx = obj->efile.maps_shndx;
+ struct bpf_map *maps = obj->maps;
+ size_t nr_maps = obj->nr_maps;
int i, nrels;
pr_debug("collecting relocating info for: '%s'\n",
@@ -891,8 +942,11 @@ bpf_program__collect_reloc(struct bpf_program *prog,
GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
}
+ pr_debug("relo for %lld value %lld name %d\n",
+ (long long) (rel.r_info >> 32),
+ (long long) sym.st_value, sym.st_name);
- if (sym.st_shndx != maps_shndx) {
+ if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
prog->section_name, sym.st_shndx);
return -LIBBPF_ERRNO__RELOC;
@@ -901,6 +955,17 @@ bpf_program__collect_reloc(struct bpf_program *prog,
insn_idx = rel.r_offset / sizeof(struct bpf_insn);
pr_debug("relocation: insn_idx=%u\n", insn_idx);
+ if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
+ if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
+ pr_warning("incorrect bpf_call opcode\n");
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ prog->reloc_desc[i].type = RELO_CALL;
+ prog->reloc_desc[i].insn_idx = insn_idx;
+ prog->reloc_desc[i].text_off = sym.st_value;
+ continue;
+ }
+
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code);
@@ -922,6 +987,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__RELOC;
}
+ prog->reloc_desc[i].type = RELO_LD64;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].map_idx = map_idx;
}
@@ -961,27 +1027,76 @@ bpf_object__create_maps(struct bpf_object *obj)
}
static int
+bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
+ struct reloc_desc *relo)
+{
+ struct bpf_insn *insn, *new_insn;
+ struct bpf_program *text;
+ size_t new_cnt;
+
+ if (relo->type != RELO_CALL)
+ return -LIBBPF_ERRNO__RELOC;
+
+ if (prog->idx == obj->efile.text_shndx) {
+ pr_warning("relo in .text insn %d into off %d\n",
+ relo->insn_idx, relo->text_off);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ if (prog->main_prog_cnt == 0) {
+ text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
+ if (!text) {
+ pr_warning("no .text section found yet relo into text exist\n");
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ new_cnt = prog->insns_cnt + text->insns_cnt;
+ new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+ if (!new_insn) {
+ pr_warning("oom in prog realloc\n");
+ return -ENOMEM;
+ }
+ memcpy(new_insn + prog->insns_cnt, text->insns,
+ text->insns_cnt * sizeof(*insn));
+ prog->insns = new_insn;
+ prog->main_prog_cnt = prog->insns_cnt;
+ prog->insns_cnt = new_cnt;
+ }
+ insn = &prog->insns[relo->insn_idx];
+ insn->imm += prog->main_prog_cnt - relo->insn_idx;
+ pr_debug("added %zd insn from %s to prog %s\n",
+ text->insns_cnt, text->section_name, prog->section_name);
+ return 0;
+}
+
+static int
bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{
- int i;
+ int i, err;
if (!prog || !prog->reloc_desc)
return 0;
for (i = 0; i < prog->nr_reloc; i++) {
- int insn_idx, map_idx;
- struct bpf_insn *insns = prog->insns;
+ if (prog->reloc_desc[i].type == RELO_LD64) {
+ struct bpf_insn *insns = prog->insns;
+ int insn_idx, map_idx;
- insn_idx = prog->reloc_desc[i].insn_idx;
- map_idx = prog->reloc_desc[i].map_idx;
+ insn_idx = prog->reloc_desc[i].insn_idx;
+ map_idx = prog->reloc_desc[i].map_idx;
- if (insn_idx >= (int)prog->insns_cnt) {
- pr_warning("relocation out of range: '%s'\n",
- prog->section_name);
- return -LIBBPF_ERRNO__RELOC;
+ if (insn_idx >= (int)prog->insns_cnt) {
+ pr_warning("relocation out of range: '%s'\n",
+ prog->section_name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+ insns[insn_idx].imm = obj->maps[map_idx].fd;
+ } else {
+ err = bpf_program__reloc_text(prog, obj,
+ &prog->reloc_desc[i]);
+ if (err)
+ return err;
}
- insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
- insns[insn_idx].imm = obj->maps[map_idx].fd;
}
zfree(&prog->reloc_desc);
@@ -1024,7 +1139,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
Elf_Data *data = obj->efile.reloc[i].data;
int idx = shdr->sh_info;
struct bpf_program *prog;
- size_t nr_maps = obj->nr_maps;
if (shdr->sh_type != SHT_REL) {
pr_warning("internal error at %d\n", __LINE__);
@@ -1033,16 +1147,13 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
prog = bpf_object__find_prog_by_idx(obj, idx);
if (!prog) {
- pr_warning("relocation failed: no %d section\n",
- idx);
+ pr_warning("relocation failed: no section(%d)\n", idx);
return -LIBBPF_ERRNO__RELOC;
}
- err = bpf_program__collect_reloc(prog, nr_maps,
+ err = bpf_program__collect_reloc(prog,
shdr, data,
- obj->efile.symbols,
- obj->efile.maps_shndx,
- obj->maps);
+ obj);
if (err)
return err;
}
@@ -1195,6 +1306,8 @@ bpf_object__load_progs(struct bpf_object *obj)
int err;
for (i = 0; i < obj->nr_programs; i++) {
+ if (obj->programs[i].idx == obj->efile.text_shndx)
+ continue;
err = bpf_program__load(&obj->programs[i],
obj->license,
obj->kern_version);
@@ -1721,6 +1834,50 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
+#define BPF_PROG_SEC(string, type) { string, sizeof(string) - 1, type }
+static const struct {
+ const char *sec;
+ size_t len;
+ enum bpf_prog_type prog_type;
+} section_names[] = {
+ BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
+ BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
+ BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
+ BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
+ BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
+ BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
+ BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
+ BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
+ BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
+ BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
+ BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
+ BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
+ BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
+ BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
+};
+#undef BPF_PROG_SEC
+
+static enum bpf_prog_type bpf_program__guess_type(struct bpf_program *prog)
+{
+ int i;
+
+ if (!prog->section_name)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(section_names); i++)
+ if (strncmp(prog->section_name, section_names[i].sec,
+ section_names[i].len) == 0)
+ return section_names[i].prog_type;
+
+err:
+ pr_warning("failed to guess program type based on section name %s\n",
+ prog->section_name);
+
+ return BPF_PROG_TYPE_UNSPEC;
+}
+
int bpf_map__fd(struct bpf_map *map)
{
return map ? map->fd : -EINVAL;
@@ -1818,7 +1975,7 @@ long libbpf_get_error(const void *ptr)
int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
- struct bpf_program *prog;
+ struct bpf_program *prog, *first_prog = NULL;
struct bpf_object *obj;
int err;
@@ -1826,13 +1983,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
if (IS_ERR(obj))
return -ENOENT;
- prog = bpf_program__next(NULL, obj);
- if (!prog) {
+ bpf_object__for_each_program(prog, obj) {
+ /*
+ * If type is not specified, try to guess it based on
+ * section name.
+ */
+ if (type == BPF_PROG_TYPE_UNSPEC) {
+ type = bpf_program__guess_type(prog);
+ if (type == BPF_PROG_TYPE_UNSPEC) {
+ bpf_object__close(obj);
+ return -EINVAL;
+ }
+ }
+
+ bpf_program__set_type(prog, type);
+ if (prog->idx != obj->efile.text_shndx && !first_prog)
+ first_prog = prog;
+ }
+
+ if (!first_prog) {
+ pr_warning("object file doesn't contain bpf program\n");
bpf_object__close(obj);
return -ENOENT;
}
- bpf_program__set_type(prog, type);
err = bpf_object__load(obj);
if (err) {
bpf_object__close(obj);
@@ -1840,6 +2014,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
}
*pobj = obj;
- *prog_fd = bpf_program__fd(prog);
+ *prog_fd = bpf_program__fd(first_prog);
return 0;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 6e20003109e0..f85906533cdd 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
+
/*
* Common eBPF ELF object loading operations.
*
@@ -42,6 +44,8 @@ enum libbpf_errno {
LIBBPF_ERRNO__PROG2BIG, /* Program too big */
LIBBPF_ERRNO__KVER, /* Incorrect kernel version */
LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */
+ LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */
+ LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */
__LIBBPF_ERRNO__END,
};
@@ -246,4 +250,6 @@ long libbpf_get_error(const void *ptr);
int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
+
+int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
#endif
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
new file mode 100644
index 000000000000..4719434278b2
--- /dev/null
+++ b/tools/lib/bpf/nlattr.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+/*
+ * NETLINK Netlink attributes
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation version 2.1
+ * of the License.
+ *
+ * Copyright (c) 2003-2013 Thomas Graf <tgraf@suug.ch>
+ */
+
+#include <errno.h>
+#include "nlattr.h"
+#include <linux/rtnetlink.h>
+#include <string.h>
+#include <stdio.h>
+
+static uint16_t nla_attr_minlen[NLA_TYPE_MAX+1] = {
+ [NLA_U8] = sizeof(uint8_t),
+ [NLA_U16] = sizeof(uint16_t),
+ [NLA_U32] = sizeof(uint32_t),
+ [NLA_U64] = sizeof(uint64_t),
+ [NLA_STRING] = 1,
+ [NLA_FLAG] = 0,
+};
+
+static int nla_len(const struct nlattr *nla)
+{
+ return nla->nla_len - NLA_HDRLEN;
+}
+
+static struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
+{
+ int totlen = NLA_ALIGN(nla->nla_len);
+
+ *remaining -= totlen;
+ return (struct nlattr *) ((char *) nla + totlen);
+}
+
+static int nla_ok(const struct nlattr *nla, int remaining)
+{
+ return remaining >= sizeof(*nla) &&
+ nla->nla_len >= sizeof(*nla) &&
+ nla->nla_len <= remaining;
+}
+
+static void *nla_data(const struct nlattr *nla)
+{
+ return (char *) nla + NLA_HDRLEN;
+}
+
+static int nla_type(const struct nlattr *nla)
+{
+ return nla->nla_type & NLA_TYPE_MASK;
+}
+
+static int validate_nla(struct nlattr *nla, int maxtype,
+ struct nla_policy *policy)
+{
+ struct nla_policy *pt;
+ unsigned int minlen = 0;
+ int type = nla_type(nla);
+
+ if (type < 0 || type > maxtype)
+ return 0;
+
+ pt = &policy[type];
+
+ if (pt->type > NLA_TYPE_MAX)
+ return 0;
+
+ if (pt->minlen)
+ minlen = pt->minlen;
+ else if (pt->type != NLA_UNSPEC)
+ minlen = nla_attr_minlen[pt->type];
+
+ if (nla_len(nla) < minlen)
+ return -1;
+
+ if (pt->maxlen && nla_len(nla) > pt->maxlen)
+ return -1;
+
+ if (pt->type == NLA_STRING) {
+ char *data = nla_data(nla);
+ if (data[nla_len(nla) - 1] != '\0')
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline int nlmsg_len(const struct nlmsghdr *nlh)
+{
+ return nlh->nlmsg_len - NLMSG_HDRLEN;
+}
+
+/**
+ * Create attribute index based on a stream of attributes.
+ * @arg tb Index array to be filled (maxtype+1 elements).
+ * @arg maxtype Maximum attribute type expected and accepted.
+ * @arg head Head of attribute stream.
+ * @arg len Length of attribute stream.
+ * @arg policy Attribute validation policy.
+ *
+ * Iterates over the stream of attributes and stores a pointer to each
+ * attribute in the index array using the attribute type as index to
+ * the array. Attribute with a type greater than the maximum type
+ * specified will be silently ignored in order to maintain backwards
+ * compatibility. If \a policy is not NULL, the attribute will be
+ * validated using the specified policy.
+ *
+ * @see nla_validate
+ * @return 0 on success or a negative error code.
+ */
+static int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
+ struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+
+ nla_for_each_attr(nla, head, len, rem) {
+ int type = nla_type(nla);
+
+ if (type > maxtype)
+ continue;
+
+ if (policy) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ if (tb[type])
+ fprintf(stderr, "Attribute of type %#x found multiple times in message, "
+ "previous attribute is being ignored.\n", type);
+
+ tb[type] = nla;
+ }
+
+ err = 0;
+errout:
+ return err;
+}
+
+/* dump netlink extended ack error message */
+int nla_dump_errormsg(struct nlmsghdr *nlh)
+{
+ struct nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = {
+ [NLMSGERR_ATTR_MSG] = { .type = NLA_STRING },
+ [NLMSGERR_ATTR_OFFS] = { .type = NLA_U32 },
+ };
+ struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr;
+ struct nlmsgerr *err;
+ char *errmsg = NULL;
+ int hlen, alen;
+
+ /* no TLVs, nothing to do here */
+ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS))
+ return 0;
+
+ err = (struct nlmsgerr *)NLMSG_DATA(nlh);
+ hlen = sizeof(*err);
+
+ /* if NLM_F_CAPPED is set then the inner err msg was capped */
+ if (!(nlh->nlmsg_flags & NLM_F_CAPPED))
+ hlen += nlmsg_len(&err->msg);
+
+ attr = (struct nlattr *) ((void *) err + hlen);
+ alen = nlh->nlmsg_len - hlen;
+
+ if (nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) {
+ fprintf(stderr,
+ "Failed to parse extended error attributes\n");
+ return 0;
+ }
+
+ if (tb[NLMSGERR_ATTR_MSG])
+ errmsg = (char *) nla_data(tb[NLMSGERR_ATTR_MSG]);
+
+ fprintf(stderr, "Kernel error message: %s\n", errmsg);
+
+ return 0;
+}
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
new file mode 100644
index 000000000000..931a71f68f93
--- /dev/null
+++ b/tools/lib/bpf/nlattr.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
+
+/*
+ * NETLINK Netlink attributes
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation version 2.1
+ * of the License.
+ *
+ * Copyright (c) 2003-2013 Thomas Graf <tgraf@suug.ch>
+ */
+
+#ifndef __NLATTR_H
+#define __NLATTR_H
+
+#include <stdint.h>
+#include <linux/netlink.h>
+/* avoid multiple definition of netlink features */
+#define __LINUX_NETLINK_H
+
+/**
+ * Standard attribute types to specify validation policy
+ */
+enum {
+ NLA_UNSPEC, /**< Unspecified type, binary data chunk */
+ NLA_U8, /**< 8 bit integer */
+ NLA_U16, /**< 16 bit integer */
+ NLA_U32, /**< 32 bit integer */
+ NLA_U64, /**< 64 bit integer */
+ NLA_STRING, /**< NUL terminated character string */
+ NLA_FLAG, /**< Flag */
+ NLA_MSECS, /**< Micro seconds (64bit) */
+ NLA_NESTED, /**< Nested attributes */
+ __NLA_TYPE_MAX,
+};
+
+#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+
+/**
+ * @ingroup attr
+ * Attribute validation policy.
+ *
+ * See section @core_doc{core_attr_parse,Attribute Parsing} for more details.
+ */
+struct nla_policy {
+ /** Type of attribute or NLA_UNSPEC */
+ uint16_t type;
+
+ /** Minimal length of payload required */
+ uint16_t minlen;
+
+ /** Maximal length of payload allowed */
+ uint16_t maxlen;
+};
+
+/**
+ * @ingroup attr
+ * Iterate over a stream of attributes
+ * @arg pos loop counter, set to current attribute
+ * @arg head head of attribute stream
+ * @arg len length of attribute stream
+ * @arg rem initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_attr(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ nla_ok(pos, rem); \
+ pos = nla_next(pos, &(rem)))
+
+int nla_dump_errormsg(struct nlmsghdr *nlh);
+
+#endif /* __NLATTR_H */
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index 42c15f906aac..a88bd507091e 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -22,22 +22,29 @@
#include <linux/bitmap.h>
#include <linux/kernel.h>
-#if !defined(find_next_bit)
+#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
+ !defined(find_next_and_bit)
/*
- * This is a common helper function for find_next_bit and
- * find_next_zero_bit. The difference is the "invert" argument, which
- * is XORed with each fetched word before searching it for one bits.
+ * This is a common helper function for find_next_bit, find_next_zero_bit, and
+ * find_next_and_bit. The differences are:
+ * - The "invert" argument, which is XORed with each fetched word before
+ * searching it for one bits.
+ * - The optional "addr2", which is anded with "addr1" if present.
*/
-static unsigned long _find_next_bit(const unsigned long *addr,
- unsigned long nbits, unsigned long start, unsigned long invert)
+static inline unsigned long _find_next_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long nbits,
+ unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (unlikely(start >= nbits))
return nbits;
- tmp = addr[start / BITS_PER_LONG] ^ invert;
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
@@ -48,7 +55,10 @@ static unsigned long _find_next_bit(const unsigned long *addr,
if (start >= nbits)
return nbits;
- tmp = addr[start / BITS_PER_LONG] ^ invert;
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
}
return min(start + __ffs(tmp), nbits);
@@ -62,7 +72,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
- return _find_next_bit(addr, size, offset, 0UL);
+ return _find_next_bit(addr, NULL, size, offset, 0UL);
}
#endif
@@ -104,6 +114,15 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
- return _find_next_bit(addr, size, offset, ~0UL);
+ return _find_next_bit(addr, NULL, size, offset, ~0UL);
+}
+#endif
+
+#ifndef find_next_and_bit
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr1, addr2, size, offset, 0UL);
}
#endif
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index 5ba754d17952..9997a8805a82 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -30,10 +30,13 @@ static void pager_preexec(void)
* have real input
*/
fd_set in;
+ fd_set exception;
FD_ZERO(&in);
+ FD_ZERO(&exception);
FD_SET(0, &in);
- select(1, &in, NULL, &in, NULL);
+ FD_SET(0, &exception);
+ select(1, &in, NULL, &exception, NULL);
setenv("LESS", "FRSX", 0);
}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 7ce724fc0544..e5f2acbb70cc 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1094,7 +1094,7 @@ static enum event_type __read_token(char **tok)
if (strcmp(*tok, "LOCAL_PR_FMT") == 0) {
free(*tok);
*tok = NULL;
- return force_token("\"\%s\" ", tok);
+ return force_token("\"%s\" ", tok);
} else if (strcmp(*tok, "STA_PR_FMT") == 0) {
free(*tok);
*tok = NULL;
@@ -3970,6 +3970,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
val &= ~fval;
}
}
+ if (val) {
+ if (print && arg->flags.delim)
+ trace_seq_puts(s, arg->flags.delim);
+ trace_seq_printf(s, "0x%llx", val);
+ }
break;
case PRINT_SYMBOL:
val = eval_num_arg(data, size, event, arg->symbol.field);
@@ -3980,6 +3985,8 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
break;
}
}
+ if (!flag)
+ trace_seq_printf(s, "0x%llx", val);
break;
case PRINT_HEX:
case PRINT_HEX_STR:
@@ -4293,6 +4300,26 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto process_again;
case 'p':
ls = 1;
+ if (isalnum(ptr[1])) {
+ ptr++;
+ /* Check for special pointers */
+ switch (*ptr) {
+ case 's':
+ case 'S':
+ case 'f':
+ case 'F':
+ break;
+ default:
+ /*
+ * Older kernels do not process
+ * dereferenced pointers.
+ * Only process if the pointer
+ * value is a printable.
+ */
+ if (isprint(*(char *)bptr))
+ goto process_string;
+ }
+ }
/* fall through */
case 'd':
case 'u':
@@ -4345,6 +4372,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
break;
case 's':
+ process_string:
arg = alloc_arg();
if (!arg) {
do_warning_event(event, "%s(%d): not enough memory!",
@@ -4949,21 +4977,27 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
else
ls = 2;
- if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
- *(ptr+1) == 'S' || *(ptr+1) == 's') {
+ if (isalnum(ptr[1]))
ptr++;
+
+ if (arg->type == PRINT_BSTRING) {
+ trace_seq_puts(s, arg->string.string);
+ break;
+ }
+
+ if (*ptr == 'F' || *ptr == 'f' ||
+ *ptr == 'S' || *ptr == 's') {
show_func = *ptr;
- } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
- print_mac_arg(s, *(ptr+1), data, size, event, arg);
- ptr++;
+ } else if (*ptr == 'M' || *ptr == 'm') {
+ print_mac_arg(s, *ptr, data, size, event, arg);
arg = arg->next;
break;
- } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+ } else if (*ptr == 'I' || *ptr == 'i') {
int n;
- n = print_ip_arg(s, ptr+1, data, size, event, arg);
+ n = print_ip_arg(s, ptr, data, size, event, arg);
if (n > 0) {
- ptr += n;
+ ptr += n - 1;
arg = arg->next;
break;
}
@@ -5532,8 +5566,14 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
event = pevent_find_event_by_record(pevent, record);
if (!event) {
- do_warning("ug! no event found for type %d",
- trace_parse_common_type(pevent, record->data));
+ int i;
+ int type = trace_parse_common_type(pevent, record->data);
+
+ do_warning("ug! no event found for type %d", type);
+ trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
+ for (i = 0; i < record->size; i++)
+ trace_seq_printf(s, " %02x",
+ ((unsigned char *)record->data)[i]);
return;
}
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index a16756ae3526..d542cb60ca1a 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -120,12 +120,12 @@ char **traceevent_plugin_list_options(void)
for (op = reg->options; op->name; op++) {
char *alias = op->plugin_alias ? op->plugin_alias : op->file;
char **temp = list;
+ int ret;
- name = malloc(strlen(op->name) + strlen(alias) + 2);
- if (!name)
+ ret = asprintf(&name, "%s:%s", alias, op->name);
+ if (ret < 0)
goto err;
- sprintf(name, "%s:%s", alias, op->name);
list = realloc(list, count + 2);
if (!list) {
list = temp;
@@ -290,17 +290,14 @@ load_plugin(struct pevent *pevent, const char *path,
const char *alias;
char *plugin;
void *handle;
+ int ret;
- plugin = malloc(strlen(path) + strlen(file) + 2);
- if (!plugin) {
+ ret = asprintf(&plugin, "%s/%s", path, file);
+ if (ret < 0) {
warning("could not allocate plugin memory\n");
return;
}
- strcpy(plugin, path);
- strcat(plugin, "/");
- strcat(plugin, file);
-
handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
warning("could not load plugin '%s'\n%s\n",
@@ -391,6 +388,7 @@ load_plugins(struct pevent *pevent, const char *suffix,
char *home;
char *path;
char *envdir;
+ int ret;
if (pevent->flags & PEVENT_DISABLE_PLUGINS)
return;
@@ -421,16 +419,12 @@ load_plugins(struct pevent *pevent, const char *suffix,
if (!home)
return;
- path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2);
- if (!path) {
+ ret = asprintf(&path, "%s/%s", home, LOCAL_PLUGIN_DIR);
+ if (ret < 0) {
warning("could not allocate plugin memory\n");
return;
}
- strcpy(path, home);
- strcat(path, "/");
- strcat(path, LOCAL_PLUGIN_DIR);
-
load_plugins_dir(pevent, suffix, path, load_plugin, data);
free(path);
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index c94e3641b046..ca424b157e46 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -24,8 +24,8 @@
#include "kbuffer.h"
-#define MISSING_EVENTS (1 << 31)
-#define MISSING_STORED (1 << 30)
+#define MISSING_EVENTS (1UL << 31)
+#define MISSING_STORED (1UL << 30)
#define COMMIT_MASK ((1 << 27) - 1)
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 315df0a70265..431e8b309f6e 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -287,12 +287,10 @@ find_event(struct pevent *pevent, struct event_list **events,
sys_name = NULL;
}
- reg = malloc(strlen(event_name) + 3);
- if (reg == NULL)
+ ret = asprintf(&reg, "^%s$", event_name);
+ if (ret < 0)
return PEVENT_ERRNO__MEM_ALLOC_FAILED;
- sprintf(reg, "^%s$", event_name);
-
ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
free(reg);
@@ -300,13 +298,12 @@ find_event(struct pevent *pevent, struct event_list **events,
return PEVENT_ERRNO__INVALID_EVENT_NAME;
if (sys_name) {
- reg = malloc(strlen(sys_name) + 3);
- if (reg == NULL) {
+ ret = asprintf(&reg, "^%s$", sys_name);
+ if (ret < 0) {
regfree(&ereg);
return PEVENT_ERRNO__MEM_ALLOC_FAILED;
}
- sprintf(reg, "^%s$", sys_name);
ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
free(reg);
if (ret) {
@@ -1634,6 +1631,7 @@ int pevent_filter_clear_trivial(struct event_filter *filter,
case FILTER_TRIVIAL_FALSE:
if (filter_type->filter->boolean.value)
continue;
+ break;
case FILTER_TRIVIAL_TRUE:
if (!filter_type->filter->boolean.value)
continue;
@@ -1879,17 +1877,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
struct pevent *pevent;
unsigned long long addr;
const char *val = NULL;
+ unsigned int size;
char hex[64];
/* If the field is not a string convert it */
if (arg->str.field->flags & FIELD_IS_STRING) {
val = record->data + arg->str.field->offset;
+ size = arg->str.field->size;
+
+ if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
+ addr = *(unsigned int *)val;
+ val = record->data + (addr & 0xffff);
+ size = addr >> 16;
+ }
/*
* We need to copy the data since we can't be sure the field
* is null terminated.
*/
- if (*(val + arg->str.field->size - 1)) {
+ if (*(val + size - 1)) {
/* copy it */
memcpy(arg->str.buffer, val, arg->str.field->size);
/* the buffer is already NULL terminated */
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 0f94af3ccaaa..e6acc281dd37 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,9 +7,11 @@ ARCH := x86
endif
# always use the host compiler
-CC = gcc
-LD = ld
-AR = ar
+HOSTCC ?= gcc
+HOSTLD ?= ld
+CC = $(HOSTCC)
+LD = $(HOSTLD)
+AR = ar
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -44,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
@$(MAKE) $(build)=objtool
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
- @./sync-check.sh
+ @$(CONFIG_SHELL) ./sync-check.sh
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 8acfc47af70e..540a209b78ab 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
*type = INSN_STACK;
op->src.type = OP_SRC_ADD;
op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
- op->dest.type = OP_SRC_REG;
+ op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_SP;
}
break;
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
EndTable
Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
82: INVPCID Gy,Mdq (66)
83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
GrpTable: Grp3_1
0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
2: NOT Eb
3: NEG Eb
4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
EndTable
GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
EndTable
# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 4c6b5c9ef073..91e8e19ff5e0 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
const char *objname;
argc--; argv++;
+ if (argc <= 0)
+ usage_with_options(orc_usage, check_options);
+
if (!strncmp(argv[0], "gen", 3)) {
argc = parse_options(argc, argv, check_options, orc_usage, 0);
if (argc != 1)
@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
objname = argv[0];
return check(objname, no_fp, no_unreachable, true);
-
}
if (!strcmp(argv[0], "dump")) {
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9b341584eb1b..a8cb69a26576 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -138,6 +138,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
"__reiserfs_panic",
"lbug_with_loc",
"fortify_panic",
+ "usercopy_abort",
};
if (func->bind == STB_WEAK)
@@ -428,6 +429,40 @@ static void add_ignores(struct objtool_file *file)
}
/*
+ * FIXME: For now, just ignore any alternatives which add retpolines. This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+ struct section *sec;
+ struct rela *rela;
+ struct instruction *insn;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("bad .discard.nospec entry");
+ return -1;
+ }
+
+ insn->ignore_alts = true;
+ }
+
+ return 0;
+}
+
+/*
* Find the destination instructions for all jumps.
*/
static int add_jump_destinations(struct objtool_file *file)
@@ -456,6 +491,13 @@ static int add_jump_destinations(struct objtool_file *file)
} else if (rela->sym->sec->idx) {
dest_sec = rela->sym->sec;
dest_off = rela->sym->sym.st_value + rela->addend + 4;
+ } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+ /*
+ * Retpoline jumps are really dynamic jumps in
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_JUMP_DYNAMIC;
+ continue;
} else {
/* sibling call */
insn->jump_dest = 0;
@@ -502,11 +544,14 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = insn->offset + insn->len + insn->immediate;
insn->call_dest = find_symbol_by_offset(insn->sec,
dest_off);
- if (!insn->call_dest) {
- WARN_FUNC("can't find call dest symbol at offset 0x%lx",
- insn->sec, insn->offset, dest_off);
+
+ if (!insn->call_dest && !insn->ignore) {
+ WARN_FUNC("unsupported intra-function call",
+ insn->sec, insn->offset);
+ WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
return -1;
}
+
} else if (rela->sym->type == STT_SECTION) {
insn->call_dest = find_symbol_by_offset(rela->sym->sec,
rela->addend+4);
@@ -550,7 +595,7 @@ static int handle_group_alt(struct objtool_file *file,
struct instruction *orig_insn,
struct instruction **new_insn)
{
- struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
+ struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
unsigned long dest_off;
last_orig_insn = NULL;
@@ -566,28 +611,30 @@ static int handle_group_alt(struct objtool_file *file,
last_orig_insn = insn;
}
- if (!next_insn_same_sec(file, last_orig_insn)) {
- WARN("%s: don't know how to handle alternatives at end of section",
- special_alt->orig_sec->name);
- return -1;
- }
-
- fake_jump = malloc(sizeof(*fake_jump));
- if (!fake_jump) {
- WARN("malloc failed");
- return -1;
+ if (next_insn_same_sec(file, last_orig_insn)) {
+ fake_jump = malloc(sizeof(*fake_jump));
+ if (!fake_jump) {
+ WARN("malloc failed");
+ return -1;
+ }
+ memset(fake_jump, 0, sizeof(*fake_jump));
+ INIT_LIST_HEAD(&fake_jump->alts);
+ clear_insn_state(&fake_jump->state);
+
+ fake_jump->sec = special_alt->new_sec;
+ fake_jump->offset = -1;
+ fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+ fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+ fake_jump->ignore = true;
}
- memset(fake_jump, 0, sizeof(*fake_jump));
- INIT_LIST_HEAD(&fake_jump->alts);
- clear_insn_state(&fake_jump->state);
-
- fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = -1;
- fake_jump->type = INSN_JUMP_UNCONDITIONAL;
- fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
- fake_jump->ignore = true;
if (!special_alt->new_len) {
+ if (!fake_jump) {
+ WARN("%s: empty alternative at end of section",
+ special_alt->orig_sec->name);
+ return -1;
+ }
+
*new_insn = fake_jump;
return 0;
}
@@ -600,6 +647,8 @@ static int handle_group_alt(struct objtool_file *file,
last_new_insn = insn;
+ insn->ignore = orig_insn->ignore_alts;
+
if (insn->type != INSN_JUMP_CONDITIONAL &&
insn->type != INSN_JUMP_UNCONDITIONAL)
continue;
@@ -608,8 +657,14 @@ static int handle_group_alt(struct objtool_file *file,
continue;
dest_off = insn->offset + insn->len + insn->immediate;
- if (dest_off == special_alt->new_off + special_alt->new_len)
+ if (dest_off == special_alt->new_off + special_alt->new_len) {
+ if (!fake_jump) {
+ WARN("%s: alternative jump to end of section",
+ special_alt->orig_sec->name);
+ return -1;
+ }
insn->jump_dest = fake_jump;
+ }
if (!insn->jump_dest) {
WARN_FUNC("can't find alternative jump destination",
@@ -624,7 +679,8 @@ static int handle_group_alt(struct objtool_file *file,
return -1;
}
- list_add(&fake_jump->list, &last_new_insn->list);
+ if (fake_jump)
+ list_add(&fake_jump->list, &last_new_insn->list);
return 0;
}
@@ -671,12 +727,6 @@ static int add_special_section_alts(struct objtool_file *file)
return ret;
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
- alt = malloc(sizeof(*alt));
- if (!alt) {
- WARN("malloc failed");
- ret = -1;
- goto out;
- }
orig_insn = find_insn(file, special_alt->orig_sec,
special_alt->orig_off);
@@ -712,6 +762,13 @@ static int add_special_section_alts(struct objtool_file *file)
goto out;
}
+ alt = malloc(sizeof(*alt));
+ if (!alt) {
+ WARN("malloc failed");
+ ret = -1;
+ goto out;
+ }
+
alt->insn = new_insn;
list_add_tail(&alt->list, &orig_insn->alts);
@@ -795,8 +852,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
* This is a fairly uncommon pattern which is new for GCC 6. As of this
* writing, there are 11 occurrences of it in the allmodconfig kernel.
*
+ * As of GCC 7 there are quite a few more of these and the 'in between' code
+ * is significant. Esp. with KASAN enabled some of the code between the mov
+ * and jmpq uses .rodata itself, which can confuse things.
+ *
* TODO: Once we have DWARF CFI and smarter instruction decoding logic,
* ensure the same register is used in the mov and jump instructions.
+ *
+ * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
*/
static struct rela *find_switch_table(struct objtool_file *file,
struct symbol *func,
@@ -818,12 +881,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
text_rela->addend + 4);
if (!rodata_rela)
return NULL;
+
file->ignore_unreachables = true;
return rodata_rela;
}
/* case 3 */
- func_for_each_insn_continue_reverse(file, func, insn) {
+ /*
+ * Backward search using the @first_jump_src links, these help avoid
+ * much of the 'in between' code. Which avoids us getting confused by
+ * it.
+ */
+ for (insn = list_prev_entry(insn, list);
+
+ &insn->list != &file->insn_list &&
+ insn->sec == func->sec &&
+ insn->offset >= func->offset;
+
+ insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+
if (insn->type == INSN_JUMP_DYNAMIC)
break;
@@ -853,14 +929,32 @@ static struct rela *find_switch_table(struct objtool_file *file,
return NULL;
}
+
static int add_func_switch_tables(struct objtool_file *file,
struct symbol *func)
{
- struct instruction *insn, *prev_jump = NULL;
+ struct instruction *insn, *last = NULL, *prev_jump = NULL;
struct rela *rela, *prev_rela = NULL;
int ret;
func_for_each_insn(file, func, insn) {
+ if (!last)
+ last = insn;
+
+ /*
+ * Store back-pointers for unconditional forward jumps such
+ * that find_switch_table() can back-track using those and
+ * avoid some potentially confusing code.
+ */
+ if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
+ insn->offset > last->offset &&
+ insn->jump_dest->offset > insn->offset &&
+ !insn->jump_dest->first_jump_src) {
+
+ insn->jump_dest->first_jump_src = insn;
+ last = insn->jump_dest;
+ }
+
if (insn->type != INSN_JUMP_DYNAMIC)
continue;
@@ -1028,11 +1122,11 @@ static int decode_sections(struct objtool_file *file)
add_ignores(file);
- ret = add_jump_destinations(file);
+ ret = add_nospec_ignores(file);
if (ret)
return ret;
- ret = add_call_destinations(file);
+ ret = add_jump_destinations(file);
if (ret)
return ret;
@@ -1040,6 +1134,10 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ ret = add_call_destinations(file);
+ if (ret)
+ return ret;
+
ret = add_switch_table_alts(file);
if (ret)
return ret;
@@ -1663,10 +1761,12 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
insn->visited = true;
- list_for_each_entry(alt, &insn->alts, list) {
- ret = validate_branch(file, alt->insn, state);
- if (ret)
- return 1;
+ if (!insn->ignore_alts) {
+ list_for_each_entry(alt, &insn->alts, list) {
+ ret = validate_branch(file, alt->insn, state);
+ if (ret)
+ return 1;
+ }
}
switch (insn->type) {
@@ -1836,13 +1936,19 @@ static bool ignore_unreachable_insn(struct instruction *insn)
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
return true;
- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
- insn = insn->jump_dest;
- continue;
+ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+ if (insn->jump_dest &&
+ insn->jump_dest->func == insn->func) {
+ insn = insn->jump_dest;
+ continue;
+ }
+
+ break;
}
if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
break;
+
insn = list_next_entry(insn, list);
}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 47d9ea70a83d..23a1d065cae1 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,9 +44,10 @@ struct instruction {
unsigned int len;
unsigned char type;
unsigned long immediate;
- bool alt_group, visited, dead_end, ignore, hint, save, restore;
+ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
struct symbol *call_dest;
struct instruction *jump_dest;
+ struct instruction *first_jump_src;
struct list_head alts;
struct symbol *func;
struct stack_op stack_op;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 24460155c82c..c1c338661699 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -26,6 +26,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <errno.h>
#include "elf.h"
#include "warn.h"
@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
elf->fd = open(name, flags);
if (elf->fd == -1) {
- perror("open");
+ fprintf(stderr, "objtool: Can't open '%s': %s\n",
+ name, strerror(errno));
goto err;
}
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 36c5bf6a2675..c3343820916a 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -76,7 +76,8 @@ int orc_dump(const char *_objname)
int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
struct orc_entry *orc = NULL;
char *name;
- unsigned long nr_sections, orc_ip_addr = 0;
+ size_t nr_sections;
+ Elf64_Addr orc_ip_addr = 0;
size_t shstrtab_idx;
Elf *elf;
Elf_Scn *scn;
@@ -187,10 +188,10 @@ int orc_dump(const char *_objname)
return -1;
}
- printf("%s+%lx:", name, rela.r_addend);
+ printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
} else {
- printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]);
+ printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
}
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index e5ca31429c9b..18384d9be4e1 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -98,6 +98,11 @@ static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
struct orc_entry *orc;
struct rela *rela;
+ if (!insn_sec->sym) {
+ WARN("missing symbol for section %s", insn_sec->name);
+ return -1;
+ }
+
/* populate ORC data */
orc = (struct orc_entry *)u_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
@@ -165,6 +170,8 @@ int create_orc_sections(struct objtool_file *file)
/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
+ if (!sec)
+ return -1;
ip_relasec = elf_create_rela_section(file->elf, sec);
if (!ip_relasec)
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 643cc4ba6872..3e5135dded16 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -31,5 +31,6 @@ config.mak.autogen
.config-detected
util/intel-pt-decoder/inat-tables.c
arch/*/include/generated/
+trace/beauty/generated/
pmu-events/pmu-events.c
pmu-events/jevents
diff --git a/tools/perf/Build b/tools/perf/Build
index b48ca40fccf9..e5232d567611 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -25,7 +25,7 @@ perf-y += builtin-data.o
perf-y += builtin-version.o
perf-y += builtin-c2c.o
-perf-$(CONFIG_AUDIT) += builtin-trace.o
+perf-$(CONFIG_TRACE) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
perf-y += bench/
@@ -50,6 +50,6 @@ libperf-y += util/
libperf-y += arch/
libperf-y += ui/
libperf-y += scripts/
-libperf-$(CONFIG_AUDIT) += trace/beauty/
+libperf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index 84681007f80f..73c2650bd0db 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -24,6 +24,9 @@ OPTIONS
-a::
--add=::
Add specified file to the cache.
+-f::
+--force::
+ Don't complain, do it.
-k::
--kcore::
Add specified kcore file to the cache. For the current host that is
diff --git a/tools/perf/Documentation/perf-data.txt b/tools/perf/Documentation/perf-data.txt
index f0796a47dfa3..90bb4aabe4f8 100644
--- a/tools/perf/Documentation/perf-data.txt
+++ b/tools/perf/Documentation/perf-data.txt
@@ -30,6 +30,10 @@ OPTIONS for 'convert'
-i::
Specify input perf data file path.
+-f::
+--force::
+ Don't complain, do it.
+
-v::
--verbose::
Be more verbose (show counter open errors, etc).
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 6f7200fb85cf..c0a66400a960 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -20,6 +20,10 @@ OPTIONS
--input=::
Input file name. (default: perf.data unless stdin is a fifo)
+-f::
+--force::
+ Don't complain, do it.
+
-F::
--freq=::
Show just the sample frequency used for each event.
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 87b2588d1cbd..a64d6588470e 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -60,6 +60,10 @@ include::itrace.txt[]
found in the jitdumps files captured in the input perf.data file. Use this option
if you are monitoring environment using JIT runtimes, such as Java, DART or V8.
+-f::
+--force::
+ Don't complain, do it.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index ab25be28c9dc..74d774592196 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -42,6 +42,10 @@ COMMON OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
+-f::
+--force::
+ Don't complan, do it.
+
REPORT OPTIONS
--------------
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index d7e4869905f1..b6866a05edd2 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -170,7 +170,7 @@ Probe points are defined by following syntax.
or,
sdt_PROVIDER:SDTEVENT
-'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
+'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function, and for return probes, a "\_\_return" suffix is automatically added to the function name. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
Note that using existing group name can conflict with other events. Especially, using the group name reserved for kernel modules can hide embedded events in the
modules.
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
@@ -182,6 +182,14 @@ Note that before using the SDT event, the target binary (on which SDT events are
For details of the SDT, see below.
https://sourceware.org/gdb/onlinedocs/gdb/Static-Probe-Points.html
+ESCAPED CHARACTER
+-----------------
+
+In the probe syntax, '=', '@', '+', ':' and ';' are treated as a special character. You can use a backslash ('\') to escape the special characters.
+This is useful if you need to probe on a specific versioned symbols, like @GLIBC_... suffixes, or also you need to specify a source file which includes the special characters.
+Note that usually single backslash is consumed by shell, so you might need to pass double backslash (\\) or wrapping with single quotes (\'AAA\@BBB').
+See EXAMPLES how it is used.
+
PROBE ARGUMENT
--------------
Each probe argument follows below syntax.
@@ -277,6 +285,14 @@ Add a USDT probe to a target process running in a different mount namespace
./perf probe --target-ns <target pid> -x /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.121-0.b13.el7_3.x86_64/jre/lib/amd64/server/libjvm.so %sdt_hotspot:thread__sleep__end
+Add a probe on specific versioned symbol by backslash escape
+
+ ./perf probe -x /lib64/libc-2.25.so 'malloc_get_state\@GLIBC_2.2.5'
+
+Add a probe in a source file using special characters by backslash escape
+
+ ./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 5a626ef666c2..3eea6de35a38 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -430,6 +430,9 @@ Configure all used events to run in user space.
--timestamp-filename
Append timestamp to output file name.
+--timestamp-boundary::
+Record timestamp boundary (time of first/last samples).
+
--switch-output[=mode]::
Generate multiple perf.data files, timestamp prefixed, switching to a new one
based on 'mode' value:
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index ddde2b54af57..907e505b6309 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -402,6 +402,26 @@ OPTIONS
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+ Also support time percent with multiple time range. Time string is
+ 'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
+
+ For example:
+ Select the second 10% time slice:
+
+ perf report --time 10%/2
+
+ Select from 0% to 10% time slice:
+
+ perf report --time 0%-10%
+
+ Select the first and second 10% time slices:
+
+ perf report --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices:
+
+ perf report --time 0%-10%,30%-40%
+
--itrace::
Options for decoding instruction tracing data. The options are:
@@ -437,8 +457,23 @@ include::itrace.txt[]
will be printed. Each entry is function name or file/line. Enabled by
default, disable with --no-inline.
+--mmaps::
+ Show --tasks output plus mmap information in a format similar to
+ /proc/<PID>/maps.
+
+ Please note that not all mmaps are stored, options affecting which ones
+ are include 'perf record --data', for instance.
+
+--stats::
+ Display overall events statistics without any further processing.
+ (like the one at the end of the perf report -D command)
+
+--tasks::
+ Display monitored tasks stored in perf data. Displaying pid/tid/ppid
+ plus the command string aligned to distinguish parent and child tasks.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
--------
-linkperf:perf-stat[1], linkperf:perf-annotate[1]
+linkperf:perf-stat[1], linkperf:perf-annotate[1], linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 55b67338548e..c7e50f263887 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -74,6 +74,10 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the sched data.
+-f::
+--force::
+ Don't complain, do it.
+
OPTIONS for 'perf sched map'
----------------------------
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2811fcf684cb..7730c1d2b5d3 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -117,7 +117,7 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
- brstackoff, callindent, insn, insnlen, synth, phys_addr.
+ brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -217,6 +217,32 @@ OPTIONS
The brstackoff field will print an offset into a specific dso/binary.
+ With the metric option perf script can compute metrics for
+ sampling periods, similar to perf stat. This requires
+ specifying a group with multiple metrics with the :S option
+ for perf record. perf will sample on the first event, and
+ compute metrics for all the events in the group. Please note
+ that the metric computed is averaged over the whole sampling
+ period, not just for the sample point.
+
+ For sample events it's possible to display misc field with -F +misc option,
+ following letters are displayed for each bit:
+
+ PERF_RECORD_MISC_KERNEL K
+ PERF_RECORD_MISC_USER U
+ PERF_RECORD_MISC_HYPERVISOR H
+ PERF_RECORD_MISC_GUEST_KERNEL G
+ PERF_RECORD_MISC_GUEST_USER g
+ PERF_RECORD_MISC_MMAP_DATA* M
+ PERF_RECORD_MISC_COMM_EXEC E
+ PERF_RECORD_MISC_SWITCH_OUT S
+
+ $ perf script -F +misc ...
+ sched-messaging 1414 K 28690.636582: 4590 cycles ...
+ sched-messaging 1407 U 28690.636600: 325620 cycles ...
+ sched-messaging 1414 K 28690.636608: 19473 cycles ...
+ misc field ___________/
+
-k::
--vmlinux=<file>::
vmlinux pathname
@@ -274,6 +300,9 @@ OPTIONS
Display context switch events i.e. events of type PERF_RECORD_SWITCH or
PERF_RECORD_SWITCH_CPU_WIDE.
+--show-lost-events
+ Display lost events i.e. events of type PERF_RECORD_LOST.
+
--demangle::
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
@@ -321,6 +350,22 @@ include::itrace.txt[]
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+ Also support time percent with multipe time range. Time string is
+ 'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
+
+ For example:
+ Select the second 10% time slice:
+ perf script --time 10%/2
+
+ Select from 0% to 10% time slice:
+ perf script --time 0%-10%
+
+ Select the first and second 10% time slices:
+ perf script --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices:
+ perf script --time 0%-10%,30%-40%
+
--max-blocks::
Set the maximum number of program blocks to print with brstackasm for
each sample.
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index df98d1c82688..ef0c7565bd5c 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -50,7 +50,9 @@ TIMECHART OPTIONS
-p::
--process::
Select the processes to display, by name or PID
-
+-f::
+--force::
+ Don't complain, do it.
--symfs=<directory>::
Look for files with symbols relative to this directory.
-n::
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 4353262bc462..8a32cc77bead 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -268,6 +268,12 @@ INTERACTIVE PROMPTING KEYS
[S]::
Stop annotation, return to full profile display.
+[K]::
+ Hide kernel symbols.
+
+[U]::
+ Hide user symbols.
+
[z]::
Toggle event count zeroing across display updates.
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index d53bea6bd571..33a88e984e66 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -86,18 +86,18 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-
In per-thread mode with inheritance mode on (default), Events are captured only when
the thread executes on the designated CPUs. Default is to monitor all CPUs.
---duration:
+--duration::
Show only events that had a duration greater than N.M ms.
---sched:
+--sched::
Accrue thread runtime and provide a summary at the end of the session.
--i
---input
+-i::
+--input::
Process events from a given perf data file.
--T
---time
+-T::
+--time::
Print full timestamp rather time relative to first sample.
--comm::
@@ -117,6 +117,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Show tool stats such as number of times fd->pathname was discovered thru
hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
+-f::
+--force::
+ Don't complain, do it.
+
-F=[all|min|maj]::
--pf=[all|min|maj]::
Trace pagefaults. Optionally, you can specify whether you want minor,
@@ -159,6 +163,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Implies '--call-graph dwarf' when --call-graph not present on the
command line, on systems where DWARF unwinding was built in.
+--print-sample::
+ Print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info for the
+ raw_syscalls:sys_{enter,exit} tracepoints, for debugging.
+
--proc-map-timeout::
When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
because the file may be huge. A time out is needed in such cases.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index e90c59c6d815..f7d85e89a98a 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -238,6 +238,33 @@ struct auxtrace_index {
struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
};
+ HEADER_STAT = 19,
+
+This is merely a flag signifying that the data section contains data
+recorded from perf stat record.
+
+ HEADER_CACHE = 20,
+
+Description of the cache hierarchy. Based on the Linux sysfs format
+in /sys/devices/system/cpu/cpu*/cache/
+
+ u32 version Currently always 1
+ u32 number_of_cache_levels
+
+struct {
+ u32 level;
+ u32 line_size;
+ u32 sets;
+ u32 ways;
+ struct perf_header_string type;
+ struct perf_header_string size;
+ struct perf_header_string map;
+}[number_of_cache_levels];
+
+ HEADER_SAMPLE_TIME = 21,
+
+Two uint64_t for the time of first sample and the time of last sample.
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index db0ca3063eae..849599f39c5e 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -32,3 +32,5 @@ Order by the overhead of source file name and line number: perf report -s srclin
System-wide collection from all CPUs: perf record -a
Show current config key-value pairs: perf config --list
Show user configuration overrides: perf config --user --list
+To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
+To report cacheline events from previous recording: perf c2c report
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index ed65e82f034e..0dfdaa9fa81e 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -22,6 +22,7 @@ include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
NO_PERF_REGS := 1
+NO_SYSCALL_TABLE := 1
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
@@ -33,7 +34,8 @@ endif
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
- CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -I$(OUTPUT)arch/x86/include/generated
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
$(call detected,CONFIG_X86_64)
@@ -55,12 +57,18 @@ endif
ifeq ($(ARCH),s390)
NO_PERF_REGS := 0
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
endif
ifeq ($(NO_PERF_REGS),0)
$(call detected,CONFIG_PERF_REGS)
endif
+ifneq ($(NO_SYSCALL_TABLE),1)
+ CFLAGS += -DHAVE_SYSCALL_TABLE
+endif
+
# So far there's only x86 and arm libdw unwind support merged in perf.
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
@@ -97,6 +105,16 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+ifdef CSINCLUDES
+ LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
+endif
+OPENCSDLIBS := -lopencsd_c_api -lopencsd
+ifdef CSLIBS
+ LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
+endif
+FEATURE_CHECK_CFLAGS-libopencsd := $(LIBOPENCSD_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libopencsd := $(LIBOPENCSD_LDFLAGS) $(OPENCSDLIBS)
+
ifeq ($(NO_PERF_REGS),0)
CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
@@ -188,9 +206,7 @@ ifdef PYTHON_CONFIG
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
- ifeq ($(CC_NO_CLANG), 1)
- PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
- endif
+ PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
endif
@@ -267,6 +283,10 @@ ifeq ($(feature-pthread-attr-setaffinity-np), 1)
CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
endif
+ifeq ($(feature-pthread-barrier), 1)
+ CFLAGS += -DHAVE_PTHREAD_BARRIER
+endif
+
ifndef NO_BIONIC
$(call feature_check,bionic)
ifeq ($(feature-bionic), 1)
@@ -343,6 +363,21 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
+ifndef NO_CORESIGHT
+ ifeq ($(feature-libopencsd), 1)
+ CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
+ LDFLAGS += $(LIBOPENCSD_LDFLAGS)
+ EXTLIBS += $(OPENCSDLIBS)
+ $(call detected,CONFIG_LIBOPENCSD)
+ ifdef CSTRACE_RAW
+ CFLAGS += -DCS_DEBUG_RAW
+ ifeq (${CSTRACE_RAW}, packed)
+ CFLAGS += -DCS_RAW_PACKED
+ endif
+ endif
+ endif
+endif
+
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf
@@ -521,14 +556,18 @@ ifndef NO_LIBUNWIND
EXTLIBS += $(EXTLIBS_LIBUNWIND)
endif
-ifndef NO_LIBAUDIT
- ifneq ($(feature-libaudit), 1)
- msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
- NO_LIBAUDIT := 1
- else
- CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
- EXTLIBS += -laudit
- $(call detected,CONFIG_AUDIT)
+ifeq ($(NO_SYSCALL_TABLE),0)
+ $(call detected,CONFIG_TRACE)
+else
+ ifndef NO_LIBAUDIT
+ ifneq ($(feature-libaudit), 1)
+ msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
+ NO_LIBAUDIT := 1
+ else
+ CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
+ EXTLIBS += -laudit
+ $(call detected,CONFIG_TRACE)
+ endif
endif
endif
@@ -576,14 +615,15 @@ ifndef NO_GTK2
endif
endif
-
ifdef NO_LIBPERL
CFLAGS += -DNO_LIBPERL
else
PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
- PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+ PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
+ PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+ PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
ifneq ($(feature-libperl), 1)
@@ -769,7 +809,7 @@ else
NO_PERF_READ_VDSOX32 := 1
endif
-ifdef LIBBABELTRACE
+ifndef NO_LIBBABELTRACE
$(call feature_check,libbabeltrace)
ifeq ($(feature-libbabeltrace), 1)
CFLAGS += -DHAVE_LIBBABELTRACE_SUPPORT $(LIBBABELTRACE_CFLAGS)
@@ -936,6 +976,10 @@ define print_var_code
endef
ifeq ($(VF),1)
+ # Display EXTRA features which are detected manualy
+ # from here with feature_check call and thus cannot
+ # be partof global state output.
+ $(foreach feat,$(FEATURE_TESTS_EXTRA),$(call feature_print_status,$(feat),))
$(call print_var,prefix)
$(call print_var,bindir)
$(call print_var,libdir)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 68cf1360a3f3..9b0351d3ce34 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -77,7 +77,7 @@ include ../scripts/utilities.mak
#
# Define NO_ZLIB if you do not want to support compressed kernel modules
#
-# Define LIBBABELTRACE if you DO want libbabeltrace support
+# Define NO_LIBBABELTRACE if you do not want libbabeltrace support
# for CTF data format.
#
# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
@@ -98,6 +98,8 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
+# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
LC_COLLATE=C
@@ -462,6 +464,13 @@ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
$(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
+arch_errno_name_array := $(beauty_outdir)/arch_errno_name_array.c
+arch_errno_hdr_dir := $(srctree)/tools
+arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
+
+$(arch_errno_name_array): $(arch_errno_tbl)
+ $(Q)$(SHELL) '$(arch_errno_tbl)' $(CC) $(arch_errno_hdr_dir) > $@
+
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
@@ -565,7 +574,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(perf_ioctl_array) \
- $(prctl_option_array)
+ $(prctl_option_array) \
+ $(arch_errno_name_array)
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -847,7 +857,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(kcmp_type_array) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
- $(OUTPUT)$(prctl_option_array)
+ $(OUTPUT)$(prctl_option_array) \
+ $(OUTPUT)$(arch_errno_name_array)
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
#
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index 8edf2cb71564..2323581b157d 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -22,6 +22,42 @@
#include "../../util/evlist.h"
#include "../../util/pmu.h"
#include "cs-etm.h"
+#include "arm-spe.h"
+
+static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
+{
+ struct perf_pmu **arm_spe_pmus = NULL;
+ int ret, i, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ /* arm_spe_xxxxxxxxx\0 */
+ char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10];
+
+ arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus);
+ if (!arm_spe_pmus) {
+ pr_err("spes alloc failed\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ ret = sprintf(arm_spe_pmu_name, "%s%d", ARM_SPE_PMU_NAME, i);
+ if (ret < 0) {
+ pr_err("sprintf failed\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ arm_spe_pmus[*nr_spes] = perf_pmu__find(arm_spe_pmu_name);
+ if (arm_spe_pmus[*nr_spes]) {
+ pr_debug2("%s %d: arm_spe_pmu %d type %d name %s\n",
+ __func__, __LINE__, *nr_spes,
+ arm_spe_pmus[*nr_spes]->type,
+ arm_spe_pmus[*nr_spes]->name);
+ (*nr_spes)++;
+ }
+ }
+
+ return arm_spe_pmus;
+}
struct auxtrace_record
*auxtrace_record__init(struct perf_evlist *evlist, int *err)
@@ -29,22 +65,51 @@ struct auxtrace_record
struct perf_pmu *cs_etm_pmu;
struct perf_evsel *evsel;
bool found_etm = false;
+ bool found_spe = false;
+ static struct perf_pmu **arm_spe_pmus = NULL;
+ static int nr_spes = 0;
+ int i;
+
+ if (!evlist)
+ return NULL;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
- if (evlist) {
- evlist__for_each_entry(evlist, evsel) {
- if (cs_etm_pmu &&
- evsel->attr.type == cs_etm_pmu->type)
- found_etm = true;
+ if (!arm_spe_pmus)
+ arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (cs_etm_pmu &&
+ evsel->attr.type == cs_etm_pmu->type)
+ found_etm = true;
+
+ if (!nr_spes)
+ continue;
+
+ for (i = 0; i < nr_spes; i++) {
+ if (evsel->attr.type == arm_spe_pmus[i]->type) {
+ found_spe = true;
+ break;
+ }
}
}
+ if (found_etm && found_spe) {
+ pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
+ *err = -EOPNOTSUPP;
+ return NULL;
+ }
+
if (found_etm)
return cs_etm_record_init(err);
+#if defined(__aarch64__)
+ if (found_spe)
+ return arm_spe_recording_init(err, arm_spe_pmus[i]);
+#endif
+
/*
- * Clear 'err' even if we haven't found a cs_etm event - that way perf
+ * Clear 'err' even if we haven't found an event - that way perf
* record can still be used even if tracers aren't present. The NULL
* return value will take care of telling the infrastructure HW tracing
* isn't available.
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index 98d67399a0d6..ac4dffc807b8 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -20,6 +20,7 @@
#include <linux/perf_event.h>
#include "cs-etm.h"
+#include "arm-spe.h"
#include "../../util/pmu.h"
struct perf_event_attr
@@ -30,7 +31,12 @@ struct perf_event_attr
/* add ETM default config here */
pmu->selectable = true;
pmu->set_drv_config = cs_etm_set_drv_config;
+#if defined(__aarch64__)
+ } else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
+ return arm_spe_pmu_default_config(pmu);
+#endif
}
+
#endif
return NULL;
}
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index cef6fb38d17e..c0b8dfef98ba 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,6 +1,9 @@
+libperf-y += header.o
+libperf-y += sym-handling.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
- ../../arm/util/cs-etm.o
+ ../../arm/util/cs-etm.o \
+ arm-spe.o
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
new file mode 100644
index 000000000000..1120e39c1b00
--- /dev/null
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <time.h>
+
+#include "../../util/cpumap.h"
+#include "../../util/evsel.h"
+#include "../../util/evlist.h"
+#include "../../util/session.h"
+#include "../../util/util.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/auxtrace.h"
+#include "../../util/arm-spe.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+
+struct arm_spe_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *arm_spe_pmu;
+ struct perf_evlist *evlist;
+};
+
+static size_t
+arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return ARM_SPE_AUXTRACE_PRIV_SIZE;
+}
+
+static int arm_spe_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct auxtrace_info_event *auxtrace_info,
+ size_t priv_size)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
+
+ if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ if (!session->evlist->nr_mmaps)
+ return -EINVAL;
+
+ auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
+ auxtrace_info->priv[ARM_SPE_PMU_TYPE] = arm_spe_pmu->type;
+
+ return 0;
+}
+
+static int arm_spe_recording_options(struct auxtrace_record *itr,
+ struct perf_evlist *evlist,
+ struct record_opts *opts)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
+ struct perf_evsel *evsel, *arm_spe_evsel = NULL;
+ bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+ struct perf_evsel *tracking_evsel;
+ int err;
+
+ sper->evlist = evlist;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->attr.type == arm_spe_pmu->type) {
+ if (arm_spe_evsel) {
+ pr_err("There may be only one " ARM_SPE_PMU_NAME "x event\n");
+ return -EINVAL;
+ }
+ evsel->attr.freq = 0;
+ evsel->attr.sample_period = 1;
+ arm_spe_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ /* We are in full trace mode but '-m,xyz' wasn't specified */
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ perf_evlist__to_front(evlist, arm_spe_evsel);
+
+ perf_evsel__set_sample_bit(arm_spe_evsel, CPU);
+ perf_evsel__set_sample_bit(arm_spe_evsel, TIME);
+ perf_evsel__set_sample_bit(arm_spe_evsel, TID);
+
+ /* Add dummy event to keep tracking */
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ tracking_evsel = perf_evlist__last(evlist);
+ perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->attr.freq = 0;
+ tracking_evsel->attr.sample_period = 1;
+ perf_evsel__set_sample_bit(tracking_evsel, TIME);
+ perf_evsel__set_sample_bit(tracking_evsel, CPU);
+ perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+
+ return 0;
+}
+
+static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
+
+ return ts.tv_sec ^ ts.tv_nsec;
+}
+
+static void arm_spe_recording_free(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+
+ free(sper);
+}
+
+static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(sper->evlist, evsel) {
+ if (evsel->attr.type == sper->arm_spe_pmu->type)
+ return perf_evlist__enable_event_idx(sper->evlist,
+ evsel, idx);
+ }
+ return -EINVAL;
+}
+
+struct auxtrace_record *arm_spe_recording_init(int *err,
+ struct perf_pmu *arm_spe_pmu)
+{
+ struct arm_spe_recording *sper;
+
+ if (!arm_spe_pmu) {
+ *err = -ENODEV;
+ return NULL;
+ }
+
+ sper = zalloc(sizeof(struct arm_spe_recording));
+ if (!sper) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ sper->arm_spe_pmu = arm_spe_pmu;
+ sper->itr.recording_options = arm_spe_recording_options;
+ sper->itr.info_priv_size = arm_spe_info_priv_size;
+ sper->itr.info_fill = arm_spe_info_fill;
+ sper->itr.free = arm_spe_recording_free;
+ sper->itr.reference = arm_spe_reference;
+ sper->itr.read_finish = arm_spe_read_finish;
+ sper->itr.alignment = 0;
+
+ return &sper->itr;
+}
+
+struct perf_event_attr
+*arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu)
+{
+ struct perf_event_attr *attr;
+
+ attr = zalloc(sizeof(struct perf_event_attr));
+ if (!attr) {
+ pr_err("arm_spe default config cannot allocate a perf_event_attr\n");
+ return NULL;
+ }
+
+ /*
+ * If kernel driver doesn't advertise a minimum,
+ * use max allowable by PMSIDR_EL1.INTERVAL
+ */
+ if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
+ &attr->sample_period) != 1) {
+ pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
+ attr->sample_period = 4096;
+ }
+
+ arm_spe_pmu->selectable = true;
+ arm_spe_pmu->is_uncore = false;
+
+ return attr;
+}
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
new file mode 100644
index 000000000000..534cd2507d83
--- /dev/null
+++ b/tools/perf/arch/arm64/util/header.c
@@ -0,0 +1,65 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <api/fs/fs.h>
+#include "header.h"
+
+#define MIDR "/regs/identification/midr_el1"
+#define MIDR_SIZE 19
+#define MIDR_REVISION_MASK 0xf
+#define MIDR_VARIANT_SHIFT 20
+#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+
+char *get_cpuid_str(struct perf_pmu *pmu)
+{
+ char *buf = NULL;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+ int cpu;
+ u64 midr = 0;
+ struct cpu_map *cpus;
+ FILE *file;
+
+ if (!sysfs || !pmu || !pmu->cpus)
+ return NULL;
+
+ buf = malloc(MIDR_SIZE);
+ if (!buf)
+ return NULL;
+
+ /* read midr from list of cpus mapped to this pmu */
+ cpus = cpu_map__get(pmu->cpus);
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
+ sysfs, cpus->map[cpu]);
+
+ file = fopen(path, "r");
+ if (!file) {
+ pr_debug("fopen failed for file %s\n", path);
+ continue;
+ }
+
+ if (!fgets(buf, MIDR_SIZE, file)) {
+ fclose(file);
+ continue;
+ }
+ fclose(file);
+
+ /* Ignore/clear Variant[23:20] and
+ * Revision[3:0] of MIDR
+ */
+ midr = strtoul(buf, NULL, 16);
+ midr &= (~(MIDR_VARIANT_MASK | MIDR_REVISION_MASK));
+ scnprintf(buf, MIDR_SIZE, "0x%016lx", midr);
+ /* got midr break loop */
+ break;
+ }
+
+ if (!midr) {
+ pr_err("failed to get cpuid string for PMU %s\n", pmu->name);
+ free(buf);
+ buf = NULL;
+ }
+
+ cpu_map__put(cpus);
+ return buf;
+}
diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
new file mode 100644
index 000000000000..0051b1ee8450
--- /dev/null
+++ b/tools/perf/arch/arm64/util/sym-handling.c
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include "debug.h"
+#include "symbol.h"
+#include "map.h"
+#include "probe-event.h"
+#include "probe-file.h"
+
+#ifdef HAVE_LIBELF_SUPPORT
+bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+{
+ return ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL ||
+ ehdr.e_type == ET_DYN;
+}
+#endif
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 8c0cfeb55f8e..c6f373508a4f 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
-#include <sys/utsname.h>
#include "common.h"
+#include "../util/env.h"
#include "../util/util.h"
#include "../util/debug.h"
-#include "sane_ctype.h"
-
const char *const arm_triplets[] = {
"arm-eabi-",
"arm-linux-androideabi-",
@@ -120,55 +118,19 @@ static int lookup_triplets(const char *const *triplets, const char *name)
return -1;
}
-/*
- * Return architecture name in a normalized form.
- * The conversion logic comes from the Makefile.
- */
-const char *normalize_arch(char *arch)
-{
- if (!strcmp(arch, "x86_64"))
- return "x86";
- if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
- return "x86";
- if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
- return "sparc";
- if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
- return "arm64";
- if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
- return "arm";
- if (!strncmp(arch, "s390", 4))
- return "s390";
- if (!strncmp(arch, "parisc", 6))
- return "parisc";
- if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
- return "powerpc";
- if (!strncmp(arch, "mips", 4))
- return "mips";
- if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
- return "sh";
-
- return arch;
-}
-
static int perf_env__lookup_binutils_path(struct perf_env *env,
const char *name, const char **path)
{
int idx;
- const char *arch, *cross_env;
- struct utsname uts;
+ const char *arch = perf_env__arch(env), *cross_env;
const char *const *path_list;
char *buf = NULL;
- arch = normalize_arch(env->arch);
-
- if (uname(&uts) < 0)
- goto out;
-
/*
* We don't need to try to find objdump path for native system.
* Just use default binutils path (e.g.: "objdump").
*/
- if (!strcmp(normalize_arch(uts.machine), arch))
+ if (!strcmp(perf_env__arch(NULL), arch))
goto out;
cross_env = getenv("CROSS_COMPILE");
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index a1546509ad24..2d875baa92e6 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -7,6 +7,5 @@
extern const char *objdump_path;
int perf_env__lookup_objdump(struct perf_env *env);
-const char *normalize_arch(char *arch);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 7a4cf80c207a..0b242664f5ea 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -35,7 +35,7 @@ get_cpuid(char *buffer, size_t sz)
}
char *
-get_cpuid_str(void)
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *bufp;
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 9c4e23d8c8ce..53d83d7e6a09 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
return strncmp(namea, nameb, n);
}
+
+const char *arch__normalize_symbol_name(const char *name)
+{
+ /* Skip over initial dot */
+ if (name && *name == '.')
+ name++;
+ return name;
+}
#endif
#if defined(_CALL_ELF) && _CALL_ELF == 2
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 21322e0385b8..dfa6e3103437 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -2,3 +2,29 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+
+#
+# Syscall table generation for perf
+#
+
+out := $(OUTPUT)arch/s390/include/generated/asm
+header := $(out)/syscalls_64.c
+syskrn := $(srctree)/arch/s390/kernel/syscalls/syscall.tbl
+sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls
+sysdef := $(sysprf)/syscall.tbl
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ (diff -B $(sysdef) $(syskrn) >/dev/null) \
+ || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
+ $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, s390) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index e0e466c650df..8c72b44444cb 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -18,7 +18,8 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
if (!strcmp(name, "br"))
ops = &ret_ops;
- arch__associate_ins_ops(arch, name, ops);
+ if (ops)
+ arch__associate_ins_ops(arch, name, ops);
return ops;
}
diff --git a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
new file mode 100755
index 000000000000..72ecbb676370
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf
+#
+# Copyright IBM Corp. 2017, 2018
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+#
+
+SYSCALL_TBL=$1
+
+if ! test -r $SYSCALL_TBL; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table()
+{
+ local max_nr nr abi sc discard
+
+ echo 'static const char *syscalltbl_s390_64[] = {'
+ while read nr abi sc discard; do
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ done
+ echo '};'
+ echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
+}
+
+grep -E "^[[:digit:]]+[[:space:]]+(common|64)" $SYSCALL_TBL \
+ |sort -k1 -n \
+ |create_table
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..b38d48464368
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -0,0 +1,390 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# System call table for s390
+#
+# Format:
+#
+# <nr> <abi> <syscall> <entry-64bit> <compat-entry>
+#
+# where <abi> can be common, 64, or 32
+
+1 common exit sys_exit sys_exit
+2 common fork sys_fork sys_fork
+3 common read sys_read compat_sys_s390_read
+4 common write sys_write compat_sys_s390_write
+5 common open sys_open compat_sys_open
+6 common close sys_close sys_close
+7 common restart_syscall sys_restart_syscall sys_restart_syscall
+8 common creat sys_creat compat_sys_creat
+9 common link sys_link compat_sys_link
+10 common unlink sys_unlink compat_sys_unlink
+11 common execve sys_execve compat_sys_execve
+12 common chdir sys_chdir compat_sys_chdir
+13 32 time - compat_sys_time
+14 common mknod sys_mknod compat_sys_mknod
+15 common chmod sys_chmod compat_sys_chmod
+16 32 lchown - compat_sys_s390_lchown16
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid sys_getpid
+21 common mount sys_mount compat_sys_mount
+22 common umount sys_oldumount compat_sys_oldumount
+23 32 setuid - compat_sys_s390_setuid16
+24 32 getuid - compat_sys_s390_getuid16
+25 32 stime - compat_sys_stime
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm sys_alarm
+29 common pause sys_pause sys_pause
+30 common utime sys_utime compat_sys_utime
+33 common access sys_access compat_sys_access
+34 common nice sys_nice sys_nice
+36 common sync sys_sync sys_sync
+37 common kill sys_kill sys_kill
+38 common rename sys_rename compat_sys_rename
+39 common mkdir sys_mkdir compat_sys_mkdir
+40 common rmdir sys_rmdir compat_sys_rmdir
+41 common dup sys_dup sys_dup
+42 common pipe sys_pipe compat_sys_pipe
+43 common times sys_times compat_sys_times
+45 common brk sys_brk compat_sys_brk
+46 32 setgid - compat_sys_s390_setgid16
+47 32 getgid - compat_sys_s390_getgid16
+48 common signal sys_signal compat_sys_signal
+49 32 geteuid - compat_sys_s390_geteuid16
+50 32 getegid - compat_sys_s390_getegid16
+51 common acct sys_acct compat_sys_acct
+52 common umount2 sys_umount compat_sys_umount
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+57 common setpgid sys_setpgid sys_setpgid
+60 common umask sys_umask sys_umask
+61 common chroot sys_chroot compat_sys_chroot
+62 common ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2 sys_dup2
+64 common getppid sys_getppid sys_getppid
+65 common getpgrp sys_getpgrp sys_getpgrp
+66 common setsid sys_setsid sys_setsid
+67 common sigaction sys_sigaction compat_sys_sigaction
+70 32 setreuid - compat_sys_s390_setreuid16
+71 32 setregid - compat_sys_s390_setregid16
+72 common sigsuspend sys_sigsuspend compat_sys_sigsuspend
+73 common sigpending sys_sigpending compat_sys_sigpending
+74 common sethostname sys_sethostname compat_sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 32 getrlimit - compat_sys_old_getrlimit
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 32 getgroups - compat_sys_s390_getgroups16
+81 32 setgroups - compat_sys_s390_setgroups16
+83 common symlink sys_symlink compat_sys_symlink
+85 common readlink sys_readlink compat_sys_readlink
+86 common uselib sys_uselib compat_sys_uselib
+87 common swapon sys_swapon compat_sys_swapon
+88 common reboot sys_reboot compat_sys_reboot
+89 common readdir - compat_sys_old_readdir
+90 common mmap sys_old_mmap compat_sys_s390_old_mmap
+91 common munmap sys_munmap compat_sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod sys_fchmod
+95 32 fchown - compat_sys_s390_fchown16
+96 common getpriority sys_getpriority sys_getpriority
+97 common setpriority sys_setpriority sys_setpriority
+99 common statfs sys_statfs compat_sys_statfs
+100 common fstatfs sys_fstatfs compat_sys_fstatfs
+101 32 ioperm - -
+102 common socketcall sys_socketcall compat_sys_socketcall
+103 common syslog sys_syslog compat_sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+110 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+111 common vhangup sys_vhangup sys_vhangup
+112 common idle - -
+114 common wait4 sys_wait4 compat_sys_wait4
+115 common swapoff sys_swapoff compat_sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 common ipc sys_s390_ipc compat_sys_s390_ipc
+118 common fsync sys_fsync sys_fsync
+119 common sigreturn sys_sigreturn compat_sys_sigreturn
+120 common clone sys_clone compat_sys_clone
+121 common setdomainname sys_setdomainname compat_sys_setdomainname
+122 common uname sys_newuname compat_sys_newuname
+124 common adjtimex sys_adjtimex compat_sys_adjtimex
+125 common mprotect sys_mprotect compat_sys_mprotect
+126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
+127 common create_module - -
+128 common init_module sys_init_module compat_sys_init_module
+129 common delete_module sys_delete_module compat_sys_delete_module
+130 common get_kernel_syms - -
+131 common quotactl sys_quotactl compat_sys_quotactl
+132 common getpgid sys_getpgid sys_getpgid
+133 common fchdir sys_fchdir sys_fchdir
+134 common bdflush sys_bdflush compat_sys_bdflush
+135 common sysfs sys_sysfs compat_sys_sysfs
+136 common personality sys_s390_personality sys_s390_personality
+137 common afs_syscall - -
+138 32 setfsuid - compat_sys_s390_setfsuid16
+139 32 setfsgid - compat_sys_s390_setfsgid16
+140 32 _llseek - compat_sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 32 _newselect - compat_sys_select
+142 64 select sys_select -
+143 common flock sys_flock sys_flock
+144 common msync sys_msync compat_sys_msync
+145 common readv sys_readv compat_sys_readv
+146 common writev sys_writev compat_sys_writev
+147 common getsid sys_getsid sys_getsid
+148 common fdatasync sys_fdatasync sys_fdatasync
+149 common _sysctl sys_sysctl compat_sys_sysctl
+150 common mlock sys_mlock compat_sys_mlock
+151 common munlock sys_munlock compat_sys_munlock
+152 common mlockall sys_mlockall sys_mlockall
+153 common munlockall sys_munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam compat_sys_sched_setparam
+155 common sched_getparam sys_sched_getparam compat_sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler compat_sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep compat_sys_nanosleep
+163 common mremap sys_mremap compat_sys_mremap
+164 32 setresuid - compat_sys_s390_setresuid16
+165 32 getresuid - compat_sys_s390_getresuid16
+167 common query_module - -
+168 common poll sys_poll compat_sys_poll
+169 common nfsservctl - -
+170 32 setresgid - compat_sys_s390_setresgid16
+171 32 getresgid - compat_sys_s390_getresgid16
+172 common prctl sys_prctl compat_sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 common pread64 sys_pread64 compat_sys_s390_pread64
+181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
+182 32 chown - compat_sys_s390_chown16
+183 common getcwd sys_getcwd compat_sys_getcwd
+184 common capget sys_capget compat_sys_capget
+185 common capset sys_capset compat_sys_capset
+186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+187 common sendfile sys_sendfile64 compat_sys_sendfile
+188 common getpmsg - -
+189 common putpmsg - -
+190 common vfork sys_vfork sys_vfork
+191 32 ugetrlimit - compat_sys_getrlimit
+191 64 getrlimit sys_getrlimit -
+192 32 mmap2 - compat_sys_s390_mmap2
+193 32 truncate64 - compat_sys_s390_truncate64
+194 32 ftruncate64 - compat_sys_s390_ftruncate64
+195 32 stat64 - compat_sys_s390_stat64
+196 32 lstat64 - compat_sys_s390_lstat64
+197 32 fstat64 - compat_sys_s390_fstat64
+198 32 lchown32 - compat_sys_lchown
+198 64 lchown sys_lchown -
+199 32 getuid32 - sys_getuid
+199 64 getuid sys_getuid -
+200 32 getgid32 - sys_getgid
+200 64 getgid sys_getgid -
+201 32 geteuid32 - sys_geteuid
+201 64 geteuid sys_geteuid -
+202 32 getegid32 - sys_getegid
+202 64 getegid sys_getegid -
+203 32 setreuid32 - sys_setreuid
+203 64 setreuid sys_setreuid -
+204 32 setregid32 - sys_setregid
+204 64 setregid sys_setregid -
+205 32 getgroups32 - compat_sys_getgroups
+205 64 getgroups sys_getgroups -
+206 32 setgroups32 - compat_sys_setgroups
+206 64 setgroups sys_setgroups -
+207 32 fchown32 - sys_fchown
+207 64 fchown sys_fchown -
+208 32 setresuid32 - sys_setresuid
+208 64 setresuid sys_setresuid -
+209 32 getresuid32 - compat_sys_getresuid
+209 64 getresuid sys_getresuid -
+210 32 setresgid32 - sys_setresgid
+210 64 setresgid sys_setresgid -
+211 32 getresgid32 - compat_sys_getresgid
+211 64 getresgid sys_getresgid -
+212 32 chown32 - compat_sys_chown
+212 64 chown sys_chown -
+213 32 setuid32 - sys_setuid
+213 64 setuid sys_setuid -
+214 32 setgid32 - sys_setgid
+214 64 setgid sys_setgid -
+215 32 setfsuid32 - sys_setfsuid
+215 64 setfsuid sys_setfsuid -
+216 32 setfsgid32 - sys_setfsgid
+216 64 setfsgid sys_setfsgid -
+217 common pivot_root sys_pivot_root compat_sys_pivot_root
+218 common mincore sys_mincore compat_sys_mincore
+219 common madvise sys_madvise compat_sys_madvise
+220 common getdents64 sys_getdents64 compat_sys_getdents64
+221 32 fcntl64 - compat_sys_fcntl64
+222 common readahead sys_readahead compat_sys_s390_readahead
+223 32 sendfile64 - compat_sys_sendfile64
+224 common setxattr sys_setxattr compat_sys_setxattr
+225 common lsetxattr sys_lsetxattr compat_sys_lsetxattr
+226 common fsetxattr sys_fsetxattr compat_sys_fsetxattr
+227 common getxattr sys_getxattr compat_sys_getxattr
+228 common lgetxattr sys_lgetxattr compat_sys_lgetxattr
+229 common fgetxattr sys_fgetxattr compat_sys_fgetxattr
+230 common listxattr sys_listxattr compat_sys_listxattr
+231 common llistxattr sys_llistxattr compat_sys_llistxattr
+232 common flistxattr sys_flistxattr compat_sys_flistxattr
+233 common removexattr sys_removexattr compat_sys_removexattr
+234 common lremovexattr sys_lremovexattr compat_sys_lremovexattr
+235 common fremovexattr sys_fremovexattr compat_sys_fremovexattr
+236 common gettid sys_gettid sys_gettid
+237 common tkill sys_tkill sys_tkill
+238 common futex sys_futex compat_sys_futex
+239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+241 common tgkill sys_tgkill sys_tgkill
+243 common io_setup sys_io_setup compat_sys_io_setup
+244 common io_destroy sys_io_destroy compat_sys_io_destroy
+245 common io_getevents sys_io_getevents compat_sys_io_getevents
+246 common io_submit sys_io_submit compat_sys_io_submit
+247 common io_cancel sys_io_cancel compat_sys_io_cancel
+248 common exit_group sys_exit_group sys_exit_group
+249 common epoll_create sys_epoll_create sys_epoll_create
+250 common epoll_ctl sys_epoll_ctl compat_sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait compat_sys_epoll_wait
+252 common set_tid_address sys_set_tid_address compat_sys_set_tid_address
+253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
+254 common timer_create sys_timer_create compat_sys_timer_create
+255 common timer_settime sys_timer_settime compat_sys_timer_settime
+256 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
+258 common timer_delete sys_timer_delete sys_timer_delete
+259 common clock_settime sys_clock_settime compat_sys_clock_settime
+260 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+261 common clock_getres sys_clock_getres compat_sys_clock_getres
+262 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
+265 common statfs64 sys_statfs64 compat_sys_statfs64
+266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+267 common remap_file_pages sys_remap_file_pages compat_sys_remap_file_pages
+268 common mbind sys_mbind compat_sys_mbind
+269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+271 common mq_open sys_mq_open compat_sys_mq_open
+272 common mq_unlink sys_mq_unlink compat_sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+274 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+275 common mq_notify sys_mq_notify compat_sys_mq_notify
+276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+277 common kexec_load sys_kexec_load compat_sys_kexec_load
+278 common add_key sys_add_key compat_sys_add_key
+279 common request_key sys_request_key compat_sys_request_key
+280 common keyctl sys_keyctl compat_sys_keyctl
+281 common waitid sys_waitid compat_sys_waitid
+282 common ioprio_set sys_ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get sys_ioprio_get
+284 common inotify_init sys_inotify_init sys_inotify_init
+285 common inotify_add_watch sys_inotify_add_watch compat_sys_inotify_add_watch
+286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
+287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+288 common openat sys_openat compat_sys_openat
+289 common mkdirat sys_mkdirat compat_sys_mkdirat
+290 common mknodat sys_mknodat compat_sys_mknodat
+291 common fchownat sys_fchownat compat_sys_fchownat
+292 common futimesat sys_futimesat compat_sys_futimesat
+293 32 fstatat64 - compat_sys_s390_fstatat64
+293 64 newfstatat sys_newfstatat -
+294 common unlinkat sys_unlinkat compat_sys_unlinkat
+295 common renameat sys_renameat compat_sys_renameat
+296 common linkat sys_linkat compat_sys_linkat
+297 common symlinkat sys_symlinkat compat_sys_symlinkat
+298 common readlinkat sys_readlinkat compat_sys_readlinkat
+299 common fchmodat sys_fchmodat compat_sys_fchmodat
+300 common faccessat sys_faccessat compat_sys_faccessat
+301 common pselect6 sys_pselect6 compat_sys_pselect6
+302 common ppoll sys_ppoll compat_sys_ppoll
+303 common unshare sys_unshare compat_sys_unshare
+304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+306 common splice sys_splice compat_sys_splice
+307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
+308 common tee sys_tee compat_sys_tee
+309 common vmsplice sys_vmsplice compat_sys_vmsplice
+310 common move_pages sys_move_pages compat_sys_move_pages
+311 common getcpu sys_getcpu compat_sys_getcpu
+312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+313 common utimes sys_utimes compat_sys_utimes
+314 common fallocate sys_fallocate compat_sys_s390_fallocate
+315 common utimensat sys_utimensat compat_sys_utimensat
+316 common signalfd sys_signalfd compat_sys_signalfd
+317 common timerfd - -
+318 common eventfd sys_eventfd sys_eventfd
+319 common timerfd_create sys_timerfd_create sys_timerfd_create
+320 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+321 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+323 common eventfd2 sys_eventfd2 sys_eventfd2
+324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
+325 common pipe2 sys_pipe2 compat_sys_pipe2
+326 common dup3 sys_dup3 sys_dup3
+327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
+328 common preadv sys_preadv compat_sys_preadv
+329 common pwritev sys_pwritev compat_sys_pwritev
+330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+331 common perf_event_open sys_perf_event_open compat_sys_perf_event_open
+332 common fanotify_init sys_fanotify_init sys_fanotify_init
+333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+334 common prlimit64 sys_prlimit64 compat_sys_prlimit64
+335 common name_to_handle_at sys_name_to_handle_at compat_sys_name_to_handle_at
+336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+337 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+338 common syncfs sys_syncfs sys_syncfs
+339 common setns sys_setns sys_setns
+340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
+343 common kcmp sys_kcmp compat_sys_kcmp
+344 common finit_module sys_finit_module compat_sys_finit_module
+345 common sched_setattr sys_sched_setattr compat_sys_sched_setattr
+346 common sched_getattr sys_sched_getattr compat_sys_sched_getattr
+347 common renameat2 sys_renameat2 compat_sys_renameat2
+348 common seccomp sys_seccomp compat_sys_seccomp
+349 common getrandom sys_getrandom compat_sys_getrandom
+350 common memfd_create sys_memfd_create compat_sys_memfd_create
+351 common bpf sys_bpf compat_sys_bpf
+352 common s390_pci_mmio_write sys_s390_pci_mmio_write compat_sys_s390_pci_mmio_write
+353 common s390_pci_mmio_read sys_s390_pci_mmio_read compat_sys_s390_pci_mmio_read
+354 common execveat sys_execveat compat_sys_execveat
+355 common userfaultfd sys_userfaultfd sys_userfaultfd
+356 common membarrier sys_membarrier sys_membarrier
+357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+359 common socket sys_socket sys_socket
+360 common socketpair sys_socketpair compat_sys_socketpair
+361 common bind sys_bind compat_sys_bind
+362 common connect sys_connect compat_sys_connect
+363 common listen sys_listen sys_listen
+364 common accept4 sys_accept4 compat_sys_accept4
+365 common getsockopt sys_getsockopt compat_sys_getsockopt
+366 common setsockopt sys_setsockopt compat_sys_setsockopt
+367 common getsockname sys_getsockname compat_sys_getsockname
+368 common getpeername sys_getpeername compat_sys_getpeername
+369 common sendto sys_sendto compat_sys_sendto
+370 common sendmsg sys_sendmsg compat_sys_sendmsg
+371 common recvfrom sys_recvfrom compat_sys_recvfrom
+372 common recvmsg sys_recvmsg compat_sys_recvmsg
+373 common shutdown sys_shutdown sys_shutdown
+374 common mlock2 sys_mlock2 compat_sys_mlock2
+375 common copy_file_range sys_copy_file_range compat_sys_copy_file_range
+376 common preadv2 sys_preadv2 compat_sys_preadv2
+377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
+379 common statx sys_statx compat_sys_statx
+380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
index d2df54a6bc5a..bcfbaed78cc2 100644
--- a/tools/perf/arch/s390/include/perf_regs.h
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -3,7 +3,7 @@
#include <stdlib.h>
#include <linux/types.h>
-#include <../../../../arch/s390/include/uapi/asm/perf_regs.h>
+#include <asm/perf_regs.h>
void perf_regs_load(u64 *regs);
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
index f47576ce13ea..a8ace5cc6301 100644
--- a/tools/perf/arch/s390/util/dwarf-regs.c
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -2,17 +2,43 @@
/*
* Mapping of DWARF debug register numbers into register names.
*
- * Copyright IBM Corp. 2010
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ * Copyright IBM Corp. 2010, 2017
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
*/
+#include <errno.h>
#include <stddef.h>
-#include <dwarf-regs.h>
+#include <stdlib.h>
#include <linux/kernel.h>
+#include <asm/ptrace.h>
+#include <string.h>
+#include <dwarf-regs.h>
#include "dwarf-regs-table.h"
const char *get_arch_regstr(unsigned int n)
{
return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
}
+
+/*
+ * Convert the register name into an offset to struct pt_regs (kernel).
+ * This is required by the BPF prologue generator. The BPF
+ * program is called in the BPF overflow handler in the perf
+ * core.
+ */
+int regs_query_register_offset(const char *name)
+{
+ unsigned long gpr;
+
+ if (!name || strncmp(name, "%r", 2))
+ return -EINVAL;
+
+ errno = 0;
+ gpr = strtoul(name + 2, NULL, 10);
+ if (errno || gpr >= 16)
+ return -EINVAL;
+
+ return offsetof(user_pt_regs, gprs) + 8 * gpr;
+}
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index b59678e8c1e2..06abe8108b33 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -84,7 +84,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
CHECK__(perf_evlist__open(evlist));
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
pc = evlist->mmap[0].base;
ret = perf_read_tsc_conversion(pc, &tc);
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 33027c5e6f92..fb0d71afee8b 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -66,11 +66,11 @@ get_cpuid(char *buffer, size_t sz)
}
char *
-get_cpuid_str(void)
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *buf = malloc(128);
- if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+ if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
free(buf);
return NULL;
}
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 9c917f80c906..05920e3edf7a 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#ifndef REMOTE_UNWIND_LIBUNWIND
#include <errno.h>
+#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 58ae6ed8f38b..9aa3a674829b 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -24,9 +24,9 @@
#include <subcmd/parse-options.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
-#include <sys/time.h>
static unsigned int nthreads = 0;
static unsigned int nsecs = 10;
@@ -118,11 +118,12 @@ static void print_summary(void)
int bench_futex_hash(int argc, const char **argv)
{
int ret = 0;
- cpu_set_t cpu;
+ cpu_set_t cpuset;
struct sigaction act;
- unsigned int i, ncpus;
+ unsigned int i;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
@@ -130,14 +131,16 @@ int bench_futex_hash(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ goto errmem;
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads) /* default to the number of CPUs */
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -163,10 +166,10 @@ int bench_futex_hash(int argc, const char **argv)
if (!worker[i].futex)
goto errmem;
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
+ ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
if (ret)
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
@@ -217,6 +220,7 @@ int bench_futex_hash(int argc, const char **argv)
print_summary();
free(worker);
+ free(cpu);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 08653ae8a8c4..8e9c4753e304 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -32,7 +33,7 @@ static struct worker *worker;
static unsigned int nsecs = 10;
static bool silent = false, multi = false;
static bool done = false, fshared = false;
-static unsigned int ncpus, nthreads = 0;
+static unsigned int nthreads = 0;
static int futex_flag = 0;
struct timeval start, end, runtime;
static pthread_mutex_t thread_lock;
@@ -113,9 +114,10 @@ static void *workerfn(void *arg)
return NULL;
}
-static void create_threads(struct worker *w, pthread_attr_t thread_attr)
+static void create_threads(struct worker *w, pthread_attr_t thread_attr,
+ struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
@@ -130,10 +132,10 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr)
} else
worker[i].futex = &global_futex;
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
@@ -147,19 +149,22 @@ int bench_futex_lock_pi(int argc, const char **argv)
unsigned int i;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
if (argc)
goto err;
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -180,7 +185,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
pthread_attr_init(&thread_attr);
gettimeofday(&start, NULL);
- create_threads(worker, thread_attr);
+ create_threads(worker, thread_attr, cpu);
pthread_attr_destroy(&thread_attr);
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 1058c194608a..fc692efa0c05 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -40,7 +41,7 @@ static bool done = false, silent = false, fshared = false;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
-static unsigned int ncpus, threads_starting, nthreads = 0;
+static unsigned int threads_starting, nthreads = 0;
static int futex_flag = 0;
static const struct option options[] = {
@@ -83,19 +84,19 @@ static void *workerfn(void *arg __maybe_unused)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr)
+ pthread_attr_t thread_attr, struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
@@ -116,19 +117,22 @@ int bench_futex_requeue(int argc, const char **argv)
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
goto err;
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "cpu_map__new");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -156,7 +160,7 @@ int bench_futex_requeue(int argc, const char **argv)
struct timeval start, end, runtime;
/* create, launch & block all threads */
- block_threads(worker, thread_attr);
+ block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index b4732dad9f89..69d8fdc87315 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -7,7 +7,17 @@
* for each individual thread to service its share of work. Ultimately
* it can be used to measure futex_wake() changes.
*/
+#include "bench.h"
+#include <linux/compiler.h>
+#include "../util/debug.h"
+#ifndef HAVE_PTHREAD_BARRIER
+int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
+{
+ pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
+ return 0;
+}
+#else /* HAVE_PTHREAD_BARRIER */
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
@@ -15,12 +25,11 @@
#include <signal.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
-#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
-#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -42,8 +51,9 @@ static bool done = false, silent = false, fshared = false;
static unsigned int nblocked_threads = 0, nwaking_threads = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
+static pthread_barrier_t barrier;
static struct stats waketime_stats, wakeup_stats;
-static unsigned int ncpus, threads_starting;
+static unsigned int threads_starting;
static int futex_flag = 0;
static const struct option options[] = {
@@ -64,6 +74,8 @@ static void *waking_workerfn(void *arg)
struct thread_data *waker = (struct thread_data *) arg;
struct timeval start, end;
+ pthread_barrier_wait(&barrier);
+
gettimeofday(&start, NULL);
waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
@@ -84,6 +96,8 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
+ pthread_barrier_init(&barrier, NULL, nwaking_threads + 1);
+
/* create and block all threads */
for (i = 0; i < nwaking_threads; i++) {
/*
@@ -96,9 +110,13 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
err(EXIT_FAILURE, "pthread_create");
}
+ pthread_barrier_wait(&barrier);
+
for (i = 0; i < nwaking_threads; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
+
+ pthread_barrier_destroy(&barrier);
}
static void *blocked_workerfn(void *arg __maybe_unused)
@@ -119,19 +137,20 @@ static void *blocked_workerfn(void *arg __maybe_unused)
return NULL;
}
-static void block_threads(pthread_t *w, pthread_attr_t thread_attr)
+static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
+ struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nblocked_threads;
/* create and block all threads */
for (i = 0; i < nblocked_threads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
@@ -205,6 +224,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
struct sigaction act;
pthread_attr_t thread_attr;
struct thread_data *waking_worker;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
@@ -217,9 +237,12 @@ int bench_futex_wake_parallel(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
+
if (!nblocked_threads)
- nblocked_threads = ncpus;
+ nblocked_threads = cpu->nr;
/* some sanity checks */
if (nwaking_threads > nblocked_threads || !nwaking_threads)
@@ -259,7 +282,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
err(EXIT_FAILURE, "calloc");
/* create, launch & block all threads */
- block_threads(blocked_worker, thread_attr);
+ block_threads(blocked_worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
@@ -297,3 +320,4 @@ int bench_futex_wake_parallel(int argc, const char **argv)
free(blocked_worker);
return ret;
}
+#endif /* HAVE_PTHREAD_BARRIER */
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 8c5c0b6b5c97..e8181ad7d088 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -89,19 +90,19 @@ static void print_summary(void)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr)
+ pthread_attr_t thread_attr, struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
@@ -122,6 +123,7 @@ int bench_futex_wake(int argc, const char **argv)
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
if (argc) {
@@ -129,7 +131,9 @@ int bench_futex_wake(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
@@ -161,7 +165,7 @@ int bench_futex_wake(int argc, const char **argv)
struct timeval start, end, runtime;
/* create, launch & block all threads */
- block_threads(worker, thread_attr);
+ block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d95fdcc26f4b..944070e98a2c 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
NULL
};
+/*
+ * To get number of numa nodes present.
+ */
+static int nr_numa_nodes(void)
+{
+ int i, nr_nodes = 0;
+
+ for (i = 0; i < g->p.nr_nodes; i++) {
+ if (numa_bitmask_isbitset(numa_nodes_ptr, i))
+ nr_nodes++;
+ }
+
+ return nr_nodes;
+}
+
+/*
+ * To check if given numa node is present.
+ */
+static int is_node_present(int node)
+{
+ return numa_bitmask_isbitset(numa_nodes_ptr, node);
+}
+
+/*
+ * To check given numa node has cpus.
+ */
+static bool node_has_cpus(int node)
+{
+ struct bitmask *cpu = numa_allocate_cpumask();
+ unsigned int i;
+
+ if (cpu && !numa_node_to_cpus(node, cpu)) {
+ for (i = 0; i < cpu->size; i++) {
+ if (numa_bitmask_isbitset(cpu, i))
+ return true;
+ }
+ }
+
+ return false; /* lets fall back to nocpus safely */
+}
+
static cpu_set_t bind_to_cpu(int target_cpu)
{
cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
static cpu_set_t bind_to_node(int target_node)
{
- int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
+ int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
cpu_set_t orig_mask, mask;
int cpu;
int ret;
- BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
+ BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
BUG_ON(!cpus_per_node);
ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
int i;
for (i = 0; i < mul; i++) {
- if (t >= g->p.nr_tasks) {
+ if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
goto out;
}
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
sum = 0;
for (node = 0; node < g->p.nr_nodes; node++) {
+ if (!is_node_present(node))
+ continue;
nr = nodes[node];
nr_min = min(nr, nr_min);
nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
process_groups = 0;
for (node = 0; node < g->p.nr_nodes; node++) {
- int processes = count_node_processes(node);
+ int processes;
+ if (!is_node_present(node))
+ continue;
+ processes = count_node_processes(node);
nr = nodes[node];
tprintf(" %2d/%-2d", nr, processes);
@@ -1291,7 +1337,7 @@ static void print_summary(void)
printf("\n ###\n");
printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
- g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
+ g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
printf(" # %5dx %5ldMB global shared mem operations\n",
g->p.nr_loops, g->p.bytes_global/1024/1024);
printf(" # %5dx %5ldMB process shared mem operations\n",
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 3d354ba6e9c5..41db2cba77eb 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -325,8 +325,8 @@ int cmd_buildid_cache(int argc, const char **argv)
"file", "kcore file to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
- OPT_STRING('p', "purge", &purge_name_list_str, "path list",
- "path(s) to remove (remove old caches too)"),
+ OPT_STRING('p', "purge", &purge_name_list_str, "file list",
+ "file(s) to remove (remove old caches too)"),
OPT_STRING('M', "missing", &missing_filename, "file",
"to find missing build ids in the cache"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 17855c4626a0..539c3d460158 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -27,13 +27,10 @@
#include "sort.h"
#include "tool.h"
#include "data.h"
-#include "sort.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
-#include <asm/bug.h>
#include "ui/browsers/hists.h"
-#include "evlist.h"
#include "thread.h"
struct c2c_hists {
@@ -2224,9 +2221,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct hist_browser *browser;
int key = -1;
const char help[] =
- " ENTER Togle callchains (if present) \n"
- " n Togle Node details info \n"
- " s Togle full lenght of symbol and source line columns \n"
+ " ENTER Toggle callchains (if present) \n"
+ " n Toggle Node details info \n"
+ " s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";
/* Display compact version first. */
@@ -2248,7 +2245,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
c2c_browser__update_nr_entries(browser);
while (1) {
- key = hist_browser__run(browser, "? - help");
+ key = hist_browser__run(browser, "? - help", true);
switch (key) {
case 's':
@@ -2303,7 +2300,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
int key = -1;
const char help[] =
" d Display cacheline details \n"
- " ENTER Togle callchains (if present) \n"
+ " ENTER Toggle callchains (if present) \n"
" q Quit \n";
browser = perf_c2c_browser__new(hists);
@@ -2317,7 +2314,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
c2c_browser__update_nr_entries(browser);
while (1) {
- key = hist_browser__run(browser, "? - help");
+ key = hist_browser__run(browser, "? - help", true);
switch (key) {
case 'q':
@@ -2393,9 +2390,10 @@ static int setup_callchain(struct perf_evlist *evlist)
enum perf_call_graph_mode mode = CALLCHAIN_NONE;
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
mode = CALLCHAIN_LBR;
else if (sample_type & PERF_SAMPLE_CALLCHAIN)
mode = CALLCHAIN_FP;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bd1fedef3d1c..4aca13f23b9d 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
add_man_viewer(value);
return 0;
}
- if (!strstarts(var, "man."))
+ if (strstarts(var, "man."))
return add_man_viewer_info(var, value);
return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
if (!perf_cmd)
return "perf";
- else if (!strstarts(perf_cmd, "perf"))
+ else if (strstarts(perf_cmd, "perf"))
return perf_cmd;
return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv)
#ifdef HAVE_LIBELF_SUPPORT
"probe",
#endif
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
"trace",
#endif
NULL };
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 16a28547ca86..40fe919bbcf3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -536,8 +536,7 @@ found:
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
- evsel->attr.read_format, &sample_sw,
- false);
+ evsel->attr.read_format, &sample_sw);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 0c36f2ac6a0e..55d919dc5bc6 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -26,6 +26,9 @@
#include <sys/timerfd.h>
#endif
#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <linux/kernel.h>
#include <linux/time64.h>
@@ -741,20 +744,20 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
u64 *mmap_time)
{
union perf_event *event;
- struct perf_sample sample;
+ u64 timestamp;
s64 n = 0;
int err;
*mmap_time = ULLONG_MAX;
while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
- err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
+ err = perf_evlist__parse_sample_timestamp(kvm->evlist, event, &timestamp);
if (err) {
perf_evlist__mmap_consume(kvm->evlist, idx);
pr_err("Failed to parse sample\n");
return -1;
}
- err = perf_session__queue_event(kvm->session, event, &sample, 0);
+ err = perf_session__queue_event(kvm->session, event, timestamp, 0);
/*
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
@@ -768,7 +771,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
/* save time stamp of our first sample for this mmap */
if (n == 0)
- *mmap_time = sample.time;
+ *mmap_time = timestamp;
/* limit events per mmap handled all at once */
n++;
@@ -1044,7 +1047,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
goto out;
}
- if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
ui__error("Failed to mmap the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
perf_evlist__close(evlist);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3d7f33e19df2..bf4ca749d1ac 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -51,7 +51,6 @@
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
-#include <asm/bug.h>
#include <linux/time64.h>
struct switch_output {
@@ -79,6 +78,7 @@ struct record {
bool no_buildid_cache_set;
bool buildid_all;
bool timestamp_filename;
+ bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
};
@@ -301,7 +301,7 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
- if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
+ if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) {
if (errno == EPERM) {
@@ -339,6 +339,22 @@ static int record__open(struct record *rec)
struct perf_evsel_config_term *err_term;
int rc = 0;
+ /*
+ * For initial_delay we need to add a dummy event so that we can track
+ * PERF_RECORD_MMAP while we wait for the initial delay to enable the
+ * real events, the ones asked by the user.
+ */
+ if (opts->initial_delay) {
+ if (perf_evlist__add_dummy(evlist))
+ return -ENOMEM;
+
+ pos = perf_evlist__first(evlist);
+ pos->tracking = 0;
+ pos = perf_evlist__last(evlist);
+ pos->tracking = 1;
+ pos->attr.enable_on_exec = 1;
+ }
+
perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, pos) {
@@ -356,6 +372,8 @@ try_again:
ui__error("%s\n", msg);
goto out;
}
+
+ pos->supported = true;
}
if (perf_evlist__apply_filters(evlist, &pos)) {
@@ -392,8 +410,15 @@ static int process_sample_event(struct perf_tool *tool,
{
struct record *rec = container_of(tool, struct record, tool);
- rec->samples++;
+ if (rec->evlist->first_sample_time == 0)
+ rec->evlist->first_sample_time = sample->time;
+
+ rec->evlist->last_sample_time = sample->time;
+ if (rec->buildid_all)
+ return 0;
+
+ rec->samples++;
return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}
@@ -418,9 +443,11 @@ static int process_buildids(struct record *rec)
/*
* If --buildid-all is given, it marks all DSO regardless of hits,
- * so no need to process samples.
+ * so no need to process samples. But if timestamp_boundary is enabled,
+ * it still needs to walk on all samples to get the timestamps of
+ * first/last samples.
*/
- if (rec->buildid_all)
+ if (rec->buildid_all && !rec->timestamp_boundary)
rec->tool.sample = NULL;
return perf_session__process_events(session);
@@ -461,7 +488,7 @@ static struct perf_event_header finished_round_event = {
};
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
- bool backward)
+ bool overwrite)
{
u64 bytes_written = rec->bytes_written;
int i;
@@ -471,18 +498,18 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (!evlist)
return 0;
- maps = backward ? evlist->backward_mmap : evlist->mmap;
+ maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
if (!maps)
return 0;
- if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
+ if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) {
- if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
+ if (perf_mmap__push(&maps[i], overwrite, rec, record__pushfn) != 0) {
rc = -1;
goto out;
}
@@ -502,7 +529,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (bytes_written != rec->bytes_written)
rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
- if (backward)
+ if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
out:
return rc;
@@ -674,8 +701,8 @@ perf_evlist__pick_pc(struct perf_evlist *evlist)
if (evlist) {
if (evlist->mmap && evlist->mmap[0].base)
return evlist->mmap[0].base;
- if (evlist->backward_mmap && evlist->backward_mmap[0].base)
- return evlist->backward_mmap[0].base;
+ if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
+ return evlist->overwrite_mmap[0].base;
}
return NULL;
}
@@ -749,23 +776,47 @@ static int record__synthesize(struct record *rec, bool tail)
goto out;
}
- err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine);
- WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
- "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
- "Check /proc/kallsyms permission or run as root.\n");
-
- err = perf_event__synthesize_modules(tool, process_synthesized_event,
- machine);
- WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
- "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
- "Check /proc/modules permission or run as root.\n");
+ if (!perf_evlist__exclude_kernel(rec->evlist)) {
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine);
+ WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
+ "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+ "Check /proc/kallsyms permission or run as root.\n");
+
+ err = perf_event__synthesize_modules(tool, process_synthesized_event,
+ machine);
+ WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
+ "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+ "Check /proc/modules permission or run as root.\n");
+ }
if (perf_guest) {
machines__process_guests(&session->machines,
perf_event__synthesize_guest_os, tool);
}
+ err = perf_event__synthesize_extra_attr(&rec->tool,
+ rec->evlist,
+ process_synthesized_event,
+ data->is_pipe);
+ if (err)
+ goto out;
+
+ err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
+ process_synthesized_event,
+ NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
+ process_synthesized_event, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize cpu map.\n");
+ return err;
+ }
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
opts->proc_map_timeout, 1);
@@ -1515,7 +1566,8 @@ static struct option __record_options[] = {
OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
&record.opts.sample_time_set,
"Record the sample timestamps"),
- OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
+ OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
+ "Record the sample period"),
OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
"don't sample"),
OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
@@ -1580,6 +1632,8 @@ static struct option __record_options[] = {
"Record build-id of all DSOs regardless of hits"),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
+ OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
+ "Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
&record.switch_output.set, "signal,size,time",
"Switch output when receive SIGUSR2 or cross size,time threshold",
@@ -1693,7 +1747,7 @@ int cmd_record(int argc, const char **argv)
err = -ENOMEM;
- if (symbol_conf.kptr_restrict)
+ if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict.\n\n"
@@ -1763,8 +1817,8 @@ int cmd_record(int argc, const char **argv)
goto out;
}
- /* Enable ignoring missing threads when -u option is defined. */
- rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
+ /* Enable ignoring missing threads when -u/-p option is defined. */
+ rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
err = -ENOMEM;
if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1394cd8d96f7..4ad5dc649716 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -15,6 +15,7 @@
#include "util/color.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -51,6 +52,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
+#include <linux/mman.h>
struct report {
struct perf_tool tool;
@@ -60,6 +62,9 @@ struct report {
bool show_threads;
bool inverted_callchain;
bool mem_mode;
+ bool stats_mode;
+ bool tasks_mode;
+ bool mmaps_mode;
bool header;
bool header_only;
bool nonany_branch_mode;
@@ -69,7 +74,9 @@ struct report {
const char *cpu_list;
const char *symbol_filter_str;
const char *time_str;
- struct perf_time_interval ptime;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
float min_percent;
u64 nr_entries;
u64 queue_size;
@@ -162,12 +169,28 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct hist_entry *he = iter->he;
struct report *rep = arg;
struct branch_info *bi;
+ struct perf_sample *sample = iter->sample;
+ struct perf_evsel *evsel = iter->evsel;
+ int err;
+
+ if (!ui__has_annotation())
+ return 0;
+
+ hist__account_cycles(sample->branch_stack, al, sample,
+ rep->nonany_branch_mode);
bi = he->branch_info;
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+ if (err)
+ goto out;
+
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+
branch_type_count(&rep->brtype_stat, &bi->flags,
bi->from.addr, bi->to.addr);
- return 0;
+out:
+ return err;
}
static int process_sample_event(struct perf_tool *tool,
@@ -186,8 +209,10 @@ static int process_sample_event(struct perf_tool *tool,
};
int ret = 0;
- if (perf_time__skip_sample(&rep->ptime, sample->time))
+ if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
+ sample->time)) {
return 0;
+ }
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -312,9 +337,10 @@ static int report__setup_sample_type(struct report *rep)
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
@@ -377,6 +403,9 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (evname != NULL)
ret += fprintf(fp, " of event '%s'", evname);
+ if (rep->time_str)
+ ret += fprintf(fp, " (time slices: %s)", rep->time_str);
+
if (symbol_conf.show_ref_callgraph &&
strstr(evname, "call-graph=no")) {
ret += fprintf(fp, ", show reference callgraph");
@@ -441,6 +470,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
+ if (perf_evlist__exclude_kernel(rep->session->evlist))
+ return;
+
if (kernel_map == NULL ||
(kernel_map->dso->hit &&
(kernel_kmap->ref_reloc_sym == NULL ||
@@ -498,7 +530,8 @@ static int report__browse_hists(struct report *rep)
case 1:
ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
rep->min_percent,
- &session->header.env);
+ &session->header.env,
+ true);
/*
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
@@ -564,6 +597,174 @@ static void report__output_resort(struct report *rep)
ui_progress__finish();
}
+static void stats_setup(struct report *rep)
+{
+ memset(&rep->tool, 0, sizeof(rep->tool));
+ rep->tool.no_warn = true;
+}
+
+static int stats_print(struct report *rep)
+{
+ struct perf_session *session = rep->session;
+
+ perf_session__fprintf_nr_events(session, stdout);
+ return 0;
+}
+
+static void tasks_setup(struct report *rep)
+{
+ memset(&rep->tool, 0, sizeof(rep->tool));
+ if (rep->mmaps_mode) {
+ rep->tool.mmap = perf_event__process_mmap;
+ rep->tool.mmap2 = perf_event__process_mmap2;
+ }
+ rep->tool.comm = perf_event__process_comm;
+ rep->tool.exit = perf_event__process_exit;
+ rep->tool.fork = perf_event__process_fork;
+ rep->tool.no_warn = true;
+}
+
+struct task {
+ struct thread *thread;
+ struct list_head list;
+ struct list_head children;
+};
+
+static struct task *tasks_list(struct task *task, struct machine *machine)
+{
+ struct thread *parent_thread, *thread = task->thread;
+ struct task *parent_task;
+
+ /* Already listed. */
+ if (!list_empty(&task->list))
+ return NULL;
+
+ /* Last one in the chain. */
+ if (thread->ppid == -1)
+ return task;
+
+ parent_thread = machine__find_thread(machine, -1, thread->ppid);
+ if (!parent_thread)
+ return ERR_PTR(-ENOENT);
+
+ parent_task = thread__priv(parent_thread);
+ list_add_tail(&task->list, &parent_task->children);
+ return tasks_list(parent_task, machine);
+}
+
+static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+{
+ size_t printed = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+
+ printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
+ indent, "", map->start, map->end,
+ map->prot & PROT_READ ? 'r' : '-',
+ map->prot & PROT_WRITE ? 'w' : '-',
+ map->prot & PROT_EXEC ? 'x' : '-',
+ map->flags & MAP_SHARED ? 's' : 'p',
+ map->pgoff,
+ map->ino, map->dso->name);
+ }
+
+ return printed;
+}
+
+static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
+{
+ int printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += maps__fprintf_task(&mg->maps[i], indent, fp);
+ return printed;
+}
+
+static void task__print_level(struct task *task, FILE *fp, int level)
+{
+ struct thread *thread = task->thread;
+ struct task *child;
+ int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
+ thread->pid_, thread->tid, thread->ppid,
+ level, "");
+
+ fprintf(fp, "%s\n", thread__comm_str(thread));
+
+ map_groups__fprintf_task(thread->mg, comm_indent, fp);
+
+ if (!list_empty(&task->children)) {
+ list_for_each_entry(child, &task->children, list)
+ task__print_level(child, fp, level + 1);
+ }
+}
+
+static int tasks_print(struct report *rep, FILE *fp)
+{
+ struct perf_session *session = rep->session;
+ struct machine *machine = &session->machines.host;
+ struct task *tasks, *task;
+ unsigned int nr = 0, itask = 0, i;
+ struct rb_node *nd;
+ LIST_HEAD(list);
+
+ /*
+ * No locking needed while accessing machine->threads,
+ * because --tasks is single threaded command.
+ */
+
+ /* Count all the threads. */
+ for (i = 0; i < THREADS__TABLE_SIZE; i++)
+ nr += machine->threads[i].nr;
+
+ tasks = malloc(sizeof(*tasks) * nr);
+ if (!tasks)
+ return -ENOMEM;
+
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
+
+ for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ task = tasks + itask++;
+
+ task->thread = rb_entry(nd, struct thread, rb_node);
+ INIT_LIST_HEAD(&task->children);
+ INIT_LIST_HEAD(&task->list);
+ thread__set_priv(task->thread, task);
+ }
+ }
+
+ /*
+ * Iterate every task down to the unprocessed parent
+ * and link all in task children list. Task with no
+ * parent is added into 'list'.
+ */
+ for (itask = 0; itask < nr; itask++) {
+ task = tasks + itask;
+
+ if (!list_empty(&task->list))
+ continue;
+
+ task = tasks_list(task, machine);
+ if (IS_ERR(task)) {
+ pr_err("Error: failed to process tasks\n");
+ free(tasks);
+ return PTR_ERR(task);
+ }
+
+ if (task)
+ list_add_tail(&task->list, &list);
+ }
+
+ fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
+
+ list_for_each_entry(task, &list, list)
+ task__print_level(task, fp, 0);
+
+ free(tasks);
+ return 0;
+}
+
static int __cmd_report(struct report *rep)
{
int ret;
@@ -595,12 +796,24 @@ static int __cmd_report(struct report *rep)
return ret;
}
+ if (rep->stats_mode)
+ stats_setup(rep);
+
+ if (rep->tasks_mode)
+ tasks_setup(rep);
+
ret = perf_session__process_events(session);
if (ret) {
ui__error("failed to process sample\n");
return ret;
}
+ if (rep->stats_mode)
+ return stats_print(rep);
+
+ if (rep->tasks_mode)
+ return tasks_print(rep, stdout);
+
report__warn_kptr_restrict(rep);
evlist__for_each_entry(session->evlist, pos)
@@ -757,6 +970,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
+ OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
+ OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
@@ -904,6 +1120,9 @@ int cmd_report(int argc, const char **argv)
report.symbol_filter_str = argv[0];
}
+ if (report.mmaps_mode)
+ report.tasks_mode = true;
+
if (quiet)
perf_quiet_option();
@@ -918,13 +1137,6 @@ int cmd_report(int argc, const char **argv)
return -EINVAL;
}
- if (report.use_stdio)
- use_browser = 0;
- else if (report.use_tui)
- use_browser = 1;
- else if (report.use_gtk)
- use_browser = 2;
-
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
@@ -1011,6 +1223,13 @@ repeat:
perf_hpp_list.need_collapse = true;
}
+ if (report.use_stdio)
+ use_browser = 0;
+ else if (report.use_tui)
+ use_browser = 1;
+ else if (report.use_gtk)
+ use_browser = 2;
+
/* Force tty output for header output and per-thread stat. */
if (report.header || report.header_only || report.show_threads)
use_browser = 0;
@@ -1018,6 +1237,12 @@ repeat:
report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
if (report.show_full_info)
report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
+ if (report.stats_mode || report.tasks_mode)
+ use_browser = 0;
+ if (report.stats_mode && report.tasks_mode) {
+ pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
+ goto error;
+ }
if (strcmp(input_name, "-") != 0)
setup_browser(true);
@@ -1040,7 +1265,8 @@ repeat:
ret = 0;
goto error;
}
- } else if (use_browser == 0 && !quiet) {
+ } else if (use_browser == 0 && !quiet &&
+ !report.stats_mode && !report.tasks_mode) {
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
stdout);
}
@@ -1074,9 +1300,36 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- if (perf_time__parse_str(&report.ptime, report.time_str) != 0) {
- pr_err("Invalid time string\n");
- return -EINVAL;
+ report.ptime_range = perf_time__range_alloc(report.time_str,
+ &report.range_size);
+ if (!report.ptime_range) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ report.range_num = perf_time__percent_parse_str(
+ report.ptime_range, report.range_size,
+ report.time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (report.range_num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ report.range_num = 1;
}
sort__setup_elide(stdout);
@@ -1089,6 +1342,8 @@ repeat:
ret = 0;
error:
+ zfree(&report.ptime_range);
+
perf_session__delete(session);
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 68f36dc0344f..ab19a6ee4093 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -22,9 +22,11 @@
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/stat.h"
+#include "util/color.h"
#include "util/string2.h"
#include "util/thread-stack.h"
#include "util/time-utils.h"
+#include "util/path.h"
#include "print_binary.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
@@ -40,6 +42,7 @@
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <unistd.h>
#include "sane_ctype.h"
@@ -90,6 +93,8 @@ enum perf_output_field {
PERF_OUTPUT_SYNTH = 1U << 25,
PERF_OUTPUT_PHYS_ADDR = 1U << 26,
PERF_OUTPUT_UREGS = 1U << 27,
+ PERF_OUTPUT_METRIC = 1U << 28,
+ PERF_OUTPUT_MISC = 1U << 29,
};
struct output_option {
@@ -124,6 +129,8 @@ struct output_option {
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
{.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
+ {.str = "metric", .field = PERF_OUTPUT_METRIC},
+ {.str = "misc", .field = PERF_OUTPUT_MISC},
};
enum {
@@ -215,12 +222,20 @@ struct perf_evsel_script {
char *filename;
FILE *fp;
u64 samples;
+ /* For metric output */
+ u64 val;
+ int gnum;
};
+static inline struct perf_evsel_script *evsel_script(struct perf_evsel *evsel)
+{
+ return (struct perf_evsel_script *)evsel->priv;
+}
+
static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel,
struct perf_data *data)
{
- struct perf_evsel_script *es = malloc(sizeof(*es));
+ struct perf_evsel_script *es = zalloc(sizeof(*es));
if (es != NULL) {
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
@@ -228,7 +243,6 @@ static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel
es->fp = fopen(es->filename, "w");
if (es->fp == NULL)
goto out_free_filename;
- es->samples = 0;
}
return es;
@@ -423,11 +437,6 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
- if (PRINT_FIELD(PERIOD) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_PERIOD, "PERIOD",
- PERF_OUTPUT_PERIOD))
- return -EINVAL;
-
if (PRINT_FIELD(IREGS) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
PERF_OUTPUT_IREGS))
@@ -588,7 +597,8 @@ static int perf_sample__fprintf_uregs(struct perf_sample *sample,
static int perf_sample__fprintf_start(struct perf_sample *sample,
struct thread *thread,
- struct perf_evsel *evsel, FILE *fp)
+ struct perf_evsel *evsel,
+ u32 type, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned long secs;
@@ -618,6 +628,47 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
printed += fprintf(fp, "[%03d] ", sample->cpu);
}
+ if (PRINT_FIELD(MISC)) {
+ int ret = 0;
+
+ #define has(m) \
+ (sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
+
+ if (has(KERNEL))
+ ret += fprintf(fp, "K");
+ if (has(USER))
+ ret += fprintf(fp, "U");
+ if (has(HYPERVISOR))
+ ret += fprintf(fp, "H");
+ if (has(GUEST_KERNEL))
+ ret += fprintf(fp, "G");
+ if (has(GUEST_USER))
+ ret += fprintf(fp, "g");
+
+ switch (type) {
+ case PERF_RECORD_MMAP:
+ case PERF_RECORD_MMAP2:
+ if (has(MMAP_DATA))
+ ret += fprintf(fp, "M");
+ break;
+ case PERF_RECORD_COMM:
+ if (has(COMM_EXEC))
+ ret += fprintf(fp, "E");
+ break;
+ case PERF_RECORD_SWITCH:
+ case PERF_RECORD_SWITCH_CPU_WIDE:
+ if (has(SWITCH_OUT))
+ ret += fprintf(fp, "S");
+ default:
+ break;
+ }
+
+ #undef has
+
+ ret += fprintf(fp, "%*s", 6 - ret, " ");
+ printed += ret;
+ }
+
if (PRINT_FIELD(TIME)) {
nsecs = sample->time;
secs = nsecs / NSEC_PER_SEC;
@@ -1437,13 +1488,16 @@ struct perf_script {
bool show_mmap_events;
bool show_switch_events;
bool show_namespace_events;
+ bool show_lost_events;
bool allocated;
bool per_event_dump;
struct cpu_map *cpus;
struct thread_map *threads;
int name_width;
const char *time_str;
- struct perf_time_interval ptime;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
};
static int perf_evlist__max_name_len(struct perf_evlist *evlist)
@@ -1477,6 +1531,88 @@ static int data_src__fprintf(u64 data_src, FILE *fp)
return fprintf(fp, "%-*s", maxlen, out);
}
+struct metric_ctx {
+ struct perf_sample *sample;
+ struct thread *thread;
+ struct perf_evsel *evsel;
+ FILE *fp;
+};
+
+static void script_print_metric(void *ctx, const char *color,
+ const char *fmt,
+ const char *unit, double val)
+{
+ struct metric_ctx *mctx = ctx;
+
+ if (!fmt)
+ return;
+ perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ PERF_RECORD_SAMPLE, mctx->fp);
+ fputs("\tmetric: ", mctx->fp);
+ if (color)
+ color_fprintf(mctx->fp, color, fmt, val);
+ else
+ printf(fmt, val);
+ fprintf(mctx->fp, " %s\n", unit);
+}
+
+static void script_new_line(void *ctx)
+{
+ struct metric_ctx *mctx = ctx;
+
+ perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ PERF_RECORD_SAMPLE, mctx->fp);
+ fputs("\tmetric: ", mctx->fp);
+}
+
+static void perf_sample__fprint_metric(struct perf_script *script,
+ struct thread *thread,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ FILE *fp)
+{
+ struct perf_stat_output_ctx ctx = {
+ .print_metric = script_print_metric,
+ .new_line = script_new_line,
+ .ctx = &(struct metric_ctx) {
+ .sample = sample,
+ .thread = thread,
+ .evsel = evsel,
+ .fp = fp,
+ },
+ .force_header = false,
+ };
+ struct perf_evsel *ev2;
+ static bool init;
+ u64 val;
+
+ if (!init) {
+ perf_stat__init_shadow_stats();
+ init = true;
+ }
+ if (!evsel->stats)
+ perf_evlist__alloc_stats(script->session->evlist, false);
+ if (evsel_script(evsel->leader)->gnum++ == 0)
+ perf_stat__reset_shadow_stats();
+ val = sample->period * evsel->scale;
+ perf_stat__update_shadow_stats(evsel,
+ val,
+ sample->cpu,
+ &rt_stat);
+ evsel_script(evsel)->val = val;
+ if (evsel_script(evsel->leader)->gnum == evsel->leader->nr_members) {
+ for_each_group_member (ev2, evsel->leader) {
+ perf_stat__print_shadow_stats(ev2,
+ evsel_script(ev2)->val,
+ sample->cpu,
+ &ctx,
+ NULL,
+ &rt_stat);
+ }
+ evsel_script(evsel->leader)->gnum = 0;
+ }
+}
+
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct perf_evsel *evsel,
struct addr_location *al,
@@ -1493,7 +1629,8 @@ static void process_event(struct perf_script *script,
++es->samples;
- perf_sample__fprintf_start(sample, thread, evsel, fp);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SAMPLE, fp);
if (PRINT_FIELD(PERIOD))
fprintf(fp, "%10" PRIu64 " ", sample->period);
@@ -1564,6 +1701,9 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
fprintf(fp, "\n");
+
+ if (PRINT_FIELD(METRIC))
+ perf_sample__fprint_metric(script, thread, evsel, sample, fp);
}
static struct scripting_ops *scripting_ops;
@@ -1643,8 +1783,10 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
- if (perf_time__skip_sample(&scr->ptime, sample->time))
+ if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
+ sample->time)) {
return 0;
+ }
if (debug_mode) {
if (sample->time < last_timestamp) {
@@ -1737,7 +1879,8 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_COMM, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1772,7 +1915,8 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_NAMESPACES, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1805,7 +1949,8 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_FORK, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
@@ -1834,7 +1979,8 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_EXIT, stdout);
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
@@ -1869,7 +2015,8 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1900,7 +2047,8 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP2, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1926,7 +2074,31 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SWITCH, stdout);
+ perf_event__fprintf(event, stdout);
+ thread__put(thread);
+ return 0;
+}
+
+static int
+process_lost_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ struct thread *thread;
+
+ thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
+ if (thread == NULL)
+ return -1;
+
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_LOST, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1955,6 +2127,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
struct perf_evsel *evsel;
evlist__for_each_entry(script->session->evlist, evsel) {
+ /*
+ * Already setup? I.e. we may be called twice in cases like
+ * Intel PT, one for the intel_pt// and dummy events, then
+ * for the evsels syntheized from the auxtrace info.
+ *
+ * Ses perf_script__process_auxtrace_info.
+ */
+ if (evsel->priv != NULL)
+ continue;
+
evsel->priv = perf_evsel_script__new(evsel, script->session->data);
if (evsel->priv == NULL)
goto out_err_fclose;
@@ -2016,6 +2198,8 @@ static int __cmd_script(struct perf_script *script)
script->tool.context_switch = process_switch_event;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
+ if (script->show_lost_events)
+ script->tool.lost = process_lost_event;
if (perf_script__setup_per_event_dump(script)) {
pr_err("Couldn't create the per event dump files\n");
@@ -2301,19 +2485,6 @@ out:
return rc;
}
-/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
-static int is_directory(const char *base_path, const struct dirent *dent)
-{
- char path[PATH_MAX];
- struct stat st;
-
- sprintf(path, "%s/%s", base_path, dent->d_name);
- if (stat(path, &st))
- return 0;
-
- return S_ISDIR(st.st_mode);
-}
-
#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
while ((lang_dirent = readdir(scripts_dir)) != NULL) \
if ((lang_dirent->d_type == DT_DIR || \
@@ -2748,9 +2919,10 @@ static void script__setup_sample_type(struct perf_script *script)
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
@@ -2838,6 +3010,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
return set_maps(script);
}
+#ifdef HAVE_AUXTRACE_SUPPORT
+static int perf_script__process_auxtrace_info(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ int ret = perf_event__process_auxtrace_info(tool, event, session);
+
+ if (ret == 0) {
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+
+ ret = perf_script__setup_per_event_dump(script);
+ }
+
+ return ret;
+}
+#else
+#define perf_script__process_auxtrace_info 0
+#endif
+
int cmd_script(int argc, const char **argv)
{
bool show_full_info = false;
@@ -2866,7 +3057,7 @@ int cmd_script(int argc, const char **argv)
.feature = perf_event__process_feature,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
- .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace_info = perf_script__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
.stat = perf_event__process_stat_event,
@@ -2946,6 +3137,8 @@ int cmd_script(int argc, const char **argv)
"Show context switch events (if recorded)"),
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
"Show namespace events (if recorded)"),
+ OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
+ "Show lost events (if recorded)"),
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
"Dump trace output to files named by the monitored events"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
@@ -3252,18 +3445,46 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ script.ptime_range = perf_time__range_alloc(script.time_str,
+ &script.range_size);
+ if (!script.ptime_range) {
+ err = -ENOMEM;
goto out_delete;
}
+ /* needs to be parsed after looking up reference time */
+ if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ err = -EINVAL;
+ goto out_delete;
+ }
+
+ script.range_num = perf_time__percent_parse_str(
+ script.ptime_range, script.range_size,
+ script.time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (script.range_num < 0) {
+ pr_err("Invalid time string\n");
+ err = -EINVAL;
+ goto out_delete;
+ }
+ } else {
+ script.range_num = 1;
+ }
+
err = __cmd_script(&script);
flush_scripting();
out_delete:
+ zfree(&script.ptime_range);
+
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 59af5a8419e2..98bf9d32f222 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -63,7 +63,6 @@
#include "util/group.h"
#include "util/session.h"
#include "util/tool.h"
-#include "util/group.h"
#include "util/string2.h"
#include "util/metricgroup.h"
#include "asm/bug.h"
@@ -214,8 +213,13 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a,
static void perf_stat__reset_stats(void)
{
+ int i;
+
perf_evlist__reset_stats(evsel_list);
perf_stat__reset_shadow_stats();
+
+ for (i = 0; i < stat_config.stats_num; i++)
+ perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
}
static int create_perf_stat_counter(struct perf_evsel *evsel)
@@ -272,7 +276,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->enable_on_exec = 1;
}
- if (target__has_cpu(&target))
+ if (target__has_cpu(&target) && !target__has_per_thread(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
@@ -335,7 +339,7 @@ static int read_counter(struct perf_evsel *counter)
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus, cpu, thread;
- if (target__has_cpu(&target))
+ if (target__has_cpu(&target) && !target__has_per_thread(&target))
ncpus = perf_evsel__nr_cpus(counter);
else
ncpus = 1;
@@ -458,19 +462,8 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
-static bool has_unit(struct perf_evsel *counter)
-{
- return counter->unit && *counter->unit;
-}
-
-static bool has_scale(struct perf_evsel *counter)
-{
- return counter->scale != 1;
-}
-
static int perf_stat_synthesize_config(bool is_pipe)
{
- struct perf_evsel *counter;
int err;
if (is_pipe) {
@@ -482,53 +475,10 @@ static int perf_stat_synthesize_config(bool is_pipe)
}
}
- /*
- * Synthesize other events stuff not carried within
- * attr event - unit, scale, name
- */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->supported)
- continue;
-
- /*
- * Synthesize unit and scale only if it's defined.
- */
- if (has_unit(counter)) {
- err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel unit.\n");
- return err;
- }
- }
-
- if (has_scale(counter)) {
- err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel scale.\n");
- return err;
- }
- }
-
- if (counter->own_cpus) {
- err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel scale.\n");
- return err;
- }
- }
-
- /*
- * Name is needed only for pipe output,
- * perf.data carries event names.
- */
- if (is_pipe) {
- err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel name.\n");
- return err;
- }
- }
- }
+ err = perf_event__synthesize_extra_attr(NULL,
+ evsel_list,
+ process_synthesized_event,
+ is_pipe);
err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
process_synthesized_event,
@@ -1151,7 +1101,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
}
static void printout(int id, int nr, struct perf_evsel *counter, double uval,
- char *prefix, u64 run, u64 ena, double noise)
+ char *prefix, u64 run, u64 ena, double noise,
+ struct runtime_stat *st)
{
struct perf_stat_output_ctx out;
struct outstate os = {
@@ -1244,7 +1195,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
perf_stat__print_shadow_stats(counter, uval,
first_shadow_cpu(counter, id),
- &out, &metric_events);
+ &out, &metric_events, st);
if (!csv_output && !metric_only) {
print_noise(counter, noise);
print_running(run, ena);
@@ -1268,7 +1219,8 @@ static void aggr_update_shadow(void)
val += perf_counts(counter->counts, cpu, 0)->val;
}
perf_stat__update_shadow_stats(counter, val,
- first_shadow_cpu(counter, id));
+ first_shadow_cpu(counter, id),
+ &rt_stat);
}
}
}
@@ -1388,7 +1340,8 @@ static void print_aggr(char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(id, nr, counter, uval, prefix, run, ena, 1.0);
+ printout(id, nr, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
if (!metric_only)
fputc('\n', output);
}
@@ -1397,13 +1350,24 @@ static void print_aggr(char *prefix)
}
}
-static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+static int cmp_val(const void *a, const void *b)
{
- FILE *output = stat_config.output;
- int nthreads = thread_map__nr(counter->threads);
- int ncpus = cpu_map__nr(counter->cpus);
- int cpu, thread;
+ return ((struct perf_aggr_thread_value *)b)->val -
+ ((struct perf_aggr_thread_value *)a)->val;
+}
+
+static struct perf_aggr_thread_value *sort_aggr_thread(
+ struct perf_evsel *counter,
+ int nthreads, int ncpus,
+ int *ret)
+{
+ int cpu, thread, i = 0;
double uval;
+ struct perf_aggr_thread_value *buf;
+
+ buf = calloc(nthreads, sizeof(struct perf_aggr_thread_value));
+ if (!buf)
+ return NULL;
for (thread = 0; thread < nthreads; thread++) {
u64 ena = 0, run = 0, val = 0;
@@ -1414,13 +1378,63 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
run += perf_counts(counter->counts, cpu, thread)->run;
}
+ uval = val * counter->scale;
+
+ /*
+ * Skip value 0 when enabling --per-thread globally,
+ * otherwise too many 0 output.
+ */
+ if (uval == 0.0 && target__has_per_thread(&target))
+ continue;
+
+ buf[i].counter = counter;
+ buf[i].id = thread;
+ buf[i].uval = uval;
+ buf[i].val = val;
+ buf[i].run = run;
+ buf[i].ena = ena;
+ i++;
+ }
+
+ qsort(buf, i, sizeof(struct perf_aggr_thread_value), cmp_val);
+
+ if (ret)
+ *ret = i;
+
+ return buf;
+}
+
+static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+{
+ FILE *output = stat_config.output;
+ int nthreads = thread_map__nr(counter->threads);
+ int ncpus = cpu_map__nr(counter->cpus);
+ int thread, sorted_threads, id;
+ struct perf_aggr_thread_value *buf;
+
+ buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads);
+ if (!buf) {
+ perror("cannot sort aggr thread");
+ return;
+ }
+
+ for (thread = 0; thread < sorted_threads; thread++) {
if (prefix)
fprintf(output, "%s", prefix);
- uval = val * counter->scale;
- printout(thread, 0, counter, uval, prefix, run, ena, 1.0);
+ id = buf[thread].id;
+ if (stat_config.stats)
+ printout(id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &stat_config.stats[id]);
+ else
+ printout(id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &rt_stat);
fputc('\n', output);
}
+
+ free(buf);
}
struct caggr_data {
@@ -1455,7 +1469,8 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = cd.avg * counter->scale;
- printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled, cd.avg);
+ printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled,
+ cd.avg, &rt_stat);
if (!metric_only)
fprintf(output, "\n");
}
@@ -1494,7 +1509,8 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
fputc('\n', output);
}
@@ -1526,7 +1542,8 @@ static void print_no_aggr_metric(char *prefix)
run = perf_counts(counter->counts, cpu, 0)->run;
uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
}
fputc('\n', stat_config.output);
}
@@ -1582,7 +1599,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
perf_stat__print_shadow_stats(counter, 0,
0,
&out,
- &metric_events);
+ &metric_events,
+ &rt_stat);
}
fputc('\n', stat_config.output);
}
@@ -2541,6 +2559,35 @@ int process_cpu_map_event(struct perf_tool *tool,
return set_maps(st);
}
+static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
+{
+ int i;
+
+ config->stats = calloc(nthreads, sizeof(struct runtime_stat));
+ if (!config->stats)
+ return -1;
+
+ config->stats_num = nthreads;
+
+ for (i = 0; i < nthreads; i++)
+ runtime_stat__init(&config->stats[i]);
+
+ return 0;
+}
+
+static void runtime_stat_delete(struct perf_stat_config *config)
+{
+ int i;
+
+ if (!config->stats)
+ return;
+
+ for (i = 0; i < config->stats_num; i++)
+ runtime_stat__exit(&config->stats[i]);
+
+ free(config->stats);
+}
+
static const char * const stat_report_usage[] = {
"perf stat report [<options>]",
NULL,
@@ -2750,12 +2797,16 @@ int cmd_stat(int argc, const char **argv)
run_count = 1;
}
- if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
- fprintf(stderr, "The --per-thread option is only available "
- "when monitoring via -p -t options.\n");
- parse_options_usage(NULL, stat_options, "p", 1);
- parse_options_usage(NULL, stat_options, "t", 1);
- goto out;
+ if ((stat_config.aggr_mode == AGGR_THREAD) &&
+ !target__has_task(&target)) {
+ if (!target.system_wide || target.cpu_list) {
+ fprintf(stderr, "The --per-thread option is only "
+ "available when monitoring via -p -t -a "
+ "options or only --per-thread.\n");
+ parse_options_usage(NULL, stat_options, "p", 1);
+ parse_options_usage(NULL, stat_options, "t", 1);
+ goto out;
+ }
}
/*
@@ -2779,6 +2830,9 @@ int cmd_stat(int argc, const char **argv)
target__validate(&target);
+ if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
+ target.per_thread = true;
+
if (perf_evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
@@ -2796,8 +2850,15 @@ int cmd_stat(int argc, const char **argv)
* Initialize thread_map with comm names,
* so we could print it out on output.
*/
- if (stat_config.aggr_mode == AGGR_THREAD)
+ if (stat_config.aggr_mode == AGGR_THREAD) {
thread_map__read_comms(evsel_list->threads);
+ if (target.system_wide) {
+ if (runtime_stat_new(&stat_config,
+ thread_map__nr(evsel_list->threads))) {
+ goto out;
+ }
+ }
+ }
if (interval && interval < 100) {
if (interval < 10) {
@@ -2887,5 +2948,8 @@ out:
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
perf_evlist__delete(evsel_list);
+
+ runtime_stat_delete(&stat_config);
+
return status;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 477a8699f0b5..b7c823ba8374 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -77,6 +77,7 @@
#include "sane_ctype.h"
static volatile int done;
+static volatile int resize;
#define HEADER_LINE_NR 5
@@ -85,17 +86,20 @@ static void perf_top__update_print_entries(struct perf_top *top)
top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
}
-static void perf_top__sig_winch(int sig __maybe_unused,
- siginfo_t *info __maybe_unused, void *arg)
+static void winch_sig(int sig __maybe_unused)
{
- struct perf_top *top = arg;
+ resize = 1;
+}
+static void perf_top__resize(struct perf_top *top)
+{
get_term_dimensions(&top->winsize);
perf_top__update_print_entries(top);
}
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct symbol *sym;
struct annotation *notes;
struct map *map;
@@ -134,7 +138,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__disassemble(sym, map, NULL, 0, NULL, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, NULL);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
@@ -226,6 +230,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
static void perf_top__show_details(struct perf_top *top)
{
struct hist_entry *he = top->sym_filter_entry;
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
struct symbol *symbol;
int more;
@@ -238,6 +243,8 @@ static void perf_top__show_details(struct perf_top *top)
pthread_mutex_lock(&notes->lock);
+ symbol__calc_percent(symbol, evsel);
+
if (notes->src == NULL)
goto out_unlock;
@@ -276,8 +283,9 @@ static void perf_top__print_sym_table(struct perf_top *top)
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
- if (hists->stats.nr_lost_warned !=
- hists->stats.nr_events[PERF_RECORD_LOST]) {
+ if (!top->record_opts.overwrite &&
+ (hists->stats.nr_lost_warned !=
+ hists->stats.nr_events[PERF_RECORD_LOST])) {
hists->stats.nr_lost_warned =
hists->stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
@@ -409,7 +417,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
- "\t[K] hide kernel_symbols symbols. \t(%s)\n",
+ "\t[K] hide kernel symbols. \t(%s)\n",
top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
@@ -473,12 +481,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
case 'e':
prompt_integer(&top->print_entries, "Enter display entries (lines)");
if (top->print_entries == 0) {
- struct sigaction act = {
- .sa_sigaction = perf_top__sig_winch,
- .sa_flags = SA_SIGINFO,
- };
- perf_top__sig_winch(SIGWINCH, NULL, top);
- sigaction(SIGWINCH, &act, NULL);
+ perf_top__resize(top);
+ signal(SIGWINCH, winch_sig);
} else {
signal(SIGWINCH, SIG_DFL);
}
@@ -608,7 +612,8 @@ static void *display_thread_tui(void *arg)
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
top->min_percent,
- &top->session->header.env);
+ &top->session->header.env,
+ !top->record_opts.overwrite);
done = 1;
return NULL;
@@ -732,14 +737,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
- ui__warning(
+ if (!perf_evlist__exclude_kernel(top->session->evlist)) {
+ ui__warning(
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
" modules" : "");
- if (use_browser <= 0)
- sleep(5);
+ if (use_browser <= 0)
+ sleep(5);
+ }
machine->kptr_restrict_warned = true;
}
@@ -802,15 +809,23 @@ static void perf_event__process_sample(struct perf_tool *tool,
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
+ struct record_opts *opts = &top->record_opts;
+ struct perf_evlist *evlist = top->evlist;
struct perf_sample sample;
struct perf_evsel *evsel;
+ struct perf_mmap *md;
struct perf_session *session = top->session;
union perf_event *event;
struct machine *machine;
+ u64 end, start;
int ret;
- while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
- ret = perf_evlist__parse_sample(top->evlist, event, &sample);
+ md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
+ if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
+ return;
+
+ while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) {
+ ret = perf_evlist__parse_sample(evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
goto next_event;
@@ -864,16 +879,120 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
} else
++session->evlist->stats.nr_unknown_events;
next_event:
- perf_evlist__mmap_consume(top->evlist, idx);
+ perf_mmap__consume(md, opts->overwrite);
}
+
+ perf_mmap__read_done(md);
}
static void perf_top__mmap_read(struct perf_top *top)
{
+ bool overwrite = top->record_opts.overwrite;
+ struct perf_evlist *evlist = top->evlist;
+ unsigned long long start, end;
int i;
+ start = rdclock();
+ if (overwrite)
+ perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
+
for (i = 0; i < top->evlist->nr_mmaps; i++)
perf_top__mmap_read_idx(top, i);
+
+ if (overwrite) {
+ perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
+ perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
+ }
+ end = rdclock();
+
+ if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
+ ui__warning("Too slow to read ring buffer.\n"
+ "Please try increasing the period (-c) or\n"
+ "decreasing the freq (-F) or\n"
+ "limiting the number of CPUs (-C)\n");
+}
+
+/*
+ * Check per-event overwrite term.
+ * perf top should support consistent term for all events.
+ * - All events don't have per-event term
+ * E.g. "cpu/cpu-cycles/,cpu/instructions/"
+ * Nothing change, return 0.
+ * - All events have same per-event term
+ * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
+ * Using the per-event setting to replace the opts->overwrite if
+ * they are different, then return 0.
+ * - Events have different per-event term
+ * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
+ * Return -1
+ * - Some of the event set per-event term, but some not.
+ * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
+ * Return -1
+ */
+static int perf_top__overwrite_check(struct perf_top *top)
+{
+ struct record_opts *opts = &top->record_opts;
+ struct perf_evlist *evlist = top->evlist;
+ struct perf_evsel_config_term *term;
+ struct list_head *config_terms;
+ struct perf_evsel *evsel;
+ int set, overwrite = -1;
+
+ evlist__for_each_entry(evlist, evsel) {
+ set = -1;
+ config_terms = &evsel->config_terms;
+ list_for_each_entry(term, config_terms, list) {
+ if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
+ set = term->val.overwrite ? 1 : 0;
+ }
+
+ /* no term for current and previous event (likely) */
+ if ((overwrite < 0) && (set < 0))
+ continue;
+
+ /* has term for both current and previous event, compare */
+ if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
+ return -1;
+
+ /* no term for current event but has term for previous one */
+ if ((overwrite >= 0) && (set < 0))
+ return -1;
+
+ /* has term for current event */
+ if ((overwrite < 0) && (set >= 0)) {
+ /* if it's first event, set overwrite */
+ if (evsel == perf_evlist__first(evlist))
+ overwrite = set;
+ else
+ return -1;
+ }
+ }
+
+ if ((overwrite >= 0) && (opts->overwrite != overwrite))
+ opts->overwrite = overwrite;
+
+ return 0;
+}
+
+static int perf_top_overwrite_fallback(struct perf_top *top,
+ struct perf_evsel *evsel)
+{
+ struct record_opts *opts = &top->record_opts;
+ struct perf_evlist *evlist = top->evlist;
+ struct perf_evsel *counter;
+
+ if (!opts->overwrite)
+ return 0;
+
+ /* only fall back when first event fails */
+ if (evsel != perf_evlist__first(evlist))
+ return 0;
+
+ evlist__for_each_entry(evlist, counter)
+ counter->attr.write_backward = false;
+ opts->overwrite = false;
+ ui__warning("fall back to non-overwrite mode\n");
+ return 1;
}
static int perf_top__start_counters(struct perf_top *top)
@@ -883,12 +1002,33 @@ static int perf_top__start_counters(struct perf_top *top)
struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
+ if (perf_top__overwrite_check(top)) {
+ ui__error("perf top only support consistent per-event "
+ "overwrite setting for all events\n");
+ goto out_err;
+ }
+
perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, counter) {
try_again:
if (perf_evsel__open(counter, top->evlist->cpus,
top->evlist->threads) < 0) {
+
+ /*
+ * Specially handle overwrite fall back.
+ * Because perf top is the only tool which has
+ * overwrite mode by default, support
+ * both overwrite and non-overwrite mode, and
+ * require consistent mode for all events.
+ *
+ * May move it to generic code with more tools
+ * have similar attribute.
+ */
+ if (perf_missing_features.write_backward &&
+ perf_top_overwrite_fallback(top, counter))
+ goto try_again;
+
if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
@@ -902,7 +1042,7 @@ try_again:
}
}
- if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg)));
goto out_err;
@@ -1028,8 +1168,13 @@ static int __cmd_top(struct perf_top *top)
perf_top__mmap_read(top);
- if (hits == top->samples)
+ if (opts->overwrite || (hits == top->samples))
ret = perf_evlist__poll(top->evlist, 100);
+
+ if (resize) {
+ perf_top__resize(top);
+ resize = 0;
+ }
}
ret = 0;
@@ -1117,6 +1262,7 @@ int cmd_top(int argc, const char **argv)
.uses_mmap = true,
},
.proc_map_timeout = 500,
+ .overwrite = 1,
},
.max_stack = sysctl_perf_event_max_stack,
.sym_pcnt_filter = 5,
@@ -1352,12 +1498,8 @@ int cmd_top(int argc, const char **argv)
get_term_dimensions(&top.winsize);
if (top.print_entries == 0) {
- struct sigaction act = {
- .sa_sigaction = perf_top__sig_winch,
- .sa_flags = SA_SIGINFO,
- };
perf_top__update_print_entries(&top);
- sigaction(SIGWINCH, &act, NULL);
+ signal(SIGWINCH, winch_sig);
}
status = __cmd_top(&top);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f2757d38c7d7..e7f1b182fc15 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -21,6 +21,7 @@
#include "builtin.h"
#include "util/color.h"
#include "util/debug.h"
+#include "util/env.h"
#include "util/event.h"
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
@@ -45,18 +46,17 @@
#include <errno.h>
#include <inttypes.h>
-#include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
#include <poll.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <linux/err.h>
#include <linux/filter.h>
-#include <linux/audit.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <fcntl.h>
#include "sane_ctype.h"
@@ -111,6 +111,7 @@ struct trace {
bool summary;
bool summary_only;
bool show_comm;
+ bool print_sample;
bool show_tool_stats;
bool trace_syscalls;
bool kernel_syscallchains;
@@ -545,9 +546,10 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
{ .scnprintf = SCA_STRARRAY, \
.parm = &strarray__##array, }
+#include "trace/beauty/arch_errno_names.c"
#include "trace/beauty/eventfd.c"
-#include "trace/beauty/flock.c"
#include "trace/beauty/futex_op.c"
+#include "trace/beauty/futex_val3.c"
#include "trace/beauty/mmap.c"
#include "trace/beauty/mode_t.c"
#include "trace/beauty/msg_flags.c"
@@ -610,7 +612,8 @@ static struct syscall_fmt {
{ .name = "fstat", .alias = "newfstat", },
{ .name = "fstatat", .alias = "newfstatat", },
{ .name = "futex",
- .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, }, },
+ .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
+ [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
{ .name = "futimesat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "getitimer",
@@ -622,6 +625,7 @@ static struct syscall_fmt {
.arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
{ .name = "getrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "gettid", .errpid = true, },
{ .name = "ioctl",
.arg = {
#if defined(__i386__) || defined(__x86_64__)
@@ -819,7 +823,7 @@ static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
size_t printed = fprintf(fp, "(");
if (!calculated)
- printed += fprintf(fp, " ? ");
+ printed += fprintf(fp, " ");
else if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
@@ -1152,12 +1156,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
if (trace->host == NULL)
return -ENOMEM;
- if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
- return -errno;
+ err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
+ if (err < 0)
+ goto out;
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
evlist->threads, trace__tool_process, false,
trace->opts.proc_map_timeout, 1);
+out:
if (err)
symbol__exit();
@@ -1552,10 +1558,9 @@ static void thread__update_stats(struct thread_trace *ttrace,
update_stats(stats, duration);
}
-static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
+static int trace__printf_interrupted_entry(struct trace *trace)
{
struct thread_trace *ttrace;
- u64 duration;
size_t printed;
if (trace->current == NULL)
@@ -1566,15 +1571,30 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
if (!ttrace->entry_pending)
return 0;
- duration = sample->time - ttrace->entry_time;
-
- printed = trace__fprintf_entry_head(trace, trace->current, duration, true, ttrace->entry_time, trace->output);
+ printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
return printed;
}
+static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct thread *thread)
+{
+ int printed = 0;
+
+ if (trace->print_sample) {
+ double ts = (double)sample->time / NSEC_PER_MSEC;
+
+ printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
+ perf_evsel__name(evsel), ts,
+ thread__comm_str(thread),
+ sample->pid, sample->tid, sample->cpu);
+ }
+
+ return printed;
+}
+
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1595,6 +1615,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (ttrace == NULL)
goto out_put;
+ trace__fprintf_sample(trace, evsel, sample, thread);
+
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
@@ -1604,7 +1626,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
}
if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
- trace__printf_interrupted_entry(trace, sample);
+ trace__printf_interrupted_entry(trace);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
@@ -1639,9 +1661,12 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse
struct callchain_cursor *cursor)
{
struct addr_location al;
+ int max_stack = evsel->attr.sample_max_stack ?
+ evsel->attr.sample_max_stack :
+ trace->max_stack;
if (machine__resolve(trace->host, &al, sample) < 0 ||
- thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
+ thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack))
return -1;
return 0;
@@ -1657,6 +1682,14 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
}
+static const char *errno_to_name(struct perf_evsel *evsel, int err)
+{
+ struct perf_env *env = perf_evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+
+ return arch_syscalls__strerrno(arch_name, err);
+}
+
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1677,6 +1710,8 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (ttrace == NULL)
goto out_put;
+ trace__fprintf_sample(trace, evsel, sample, thread);
+
if (trace->summary)
thread__update_stats(ttrace, id, sample);
@@ -1727,7 +1762,7 @@ signed_print:
errno_print: {
char bf[STRERR_BUFSIZE];
const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
- *e = audit_errno_to_name(-ret);
+ *e = errno_to_name(evsel, -ret);
fprintf(trace->output, ") = -1 %s %s", e, emsg);
}
@@ -1908,7 +1943,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
}
}
- trace__printf_interrupted_entry(trace, sample);
+ trace__printf_interrupted_entry(trace);
trace__fprintf_tstamp(trace, sample->time, trace->output);
if (trace->trace_syscalls)
@@ -2219,6 +2254,9 @@ static int trace__add_syscall_newtp(struct trace *trace)
if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
goto out_delete_sys_exit;
+ perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
+ perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
+
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
@@ -2315,6 +2353,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
+ perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
perf_evlist__add(evlist, pgfault_maj);
}
@@ -2322,6 +2361,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
+ perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
perf_evlist__add(evlist, pgfault_min);
}
@@ -2342,45 +2382,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist;
}
- perf_evlist__config(evlist, &trace->opts, NULL);
-
- if (callchain_param.enabled) {
- bool use_identifier = false;
-
- if (trace->syscalls.events.sys_exit) {
- perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
- &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (pgfault_maj) {
- perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (pgfault_min) {
- perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (use_identifier) {
- /*
- * Now we have evsels with different sample_ids, use
- * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
- * from a fixed position in each ring buffer record.
- *
- * As of this the changeset introducing this comment, this
- * isn't strictly needed, as the fields that can come before
- * PERF_SAMPLE_ID are all used, but we'll probably disable
- * some of those for things like copying the payload of
- * pointer syscall arguments, and for vfs_getname we don't
- * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
- * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
- */
- perf_evlist__set_sample_bit(evlist, IDENTIFIER);
- perf_evlist__reset_sample_bit(evlist, ID);
- }
- }
+ perf_evlist__config(evlist, &trace->opts, &callchain_param);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
@@ -2435,7 +2437,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
- err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -2453,6 +2455,18 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
evlist->threads->nr > 1 ||
perf_evlist__first(evlist)->attr.inherit;
+
+ /*
+ * Now that we already used evsel->attr to ask the kernel to setup the
+ * events, lets reuse evsel->attr.sample_max_stack as the limit in
+ * trace__resolve_callchain(), allowing per-event max-stack settings
+ * to override an explicitely set --max-stack global setting.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
+ evsel->attr.sample_max_stack == 0)
+ evsel->attr.sample_max_stack = trace->max_stack;
+ }
again:
before = trace->nr_events;
@@ -3044,6 +3058,8 @@ int cmd_trace(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
+ "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
@@ -3095,8 +3111,9 @@ int cmd_trace(int argc, const char **argv)
}
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
+ if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
+ }
#endif
if (callchain_param.enabled) {
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 77406d25e521..790ec25919a0 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h
arch/arm/include/uapi/asm/perf_regs.h
arch/arm64/include/uapi/asm/perf_regs.h
arch/powerpc/include/uapi/asm/perf_regs.h
+arch/s390/include/uapi/asm/perf_regs.h
arch/x86/include/uapi/asm/perf_regs.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/include/uapi/asm/kvm_perf.h
@@ -30,22 +31,31 @@ arch/x86/include/uapi/asm/vmx.h
arch/powerpc/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h
arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+arch/alpha/include/uapi/asm/errno.h
+arch/mips/include/asm/errno.h
+arch/mips/include/uapi/asm/errno.h
+arch/parisc/include/uapi/asm/errno.h
+arch/powerpc/include/uapi/asm/errno.h
+arch/sparc/include/uapi/asm/errno.h
+arch/x86/include/uapi/asm/errno.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/asm-generic/bitops/fls64.h
include/linux/coresight-pmu.h
+include/uapi/asm-generic/errno.h
+include/uapi/asm-generic/errno-base.h
include/uapi/asm-generic/ioctls.h
include/uapi/asm-generic/mman-common.h
'
check () {
file=$1
- opts="--ignore-blank-lines --ignore-space-change"
shift
while [ -n "$*" ]; do
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index cf36de7ea255..0c6d1002b524 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,
}
int
-jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
- jvmti_line_info_t *li, int nr_lines)
+jvmti_write_debug_info(void *agent, uint64_t code,
+ int nr_lines, jvmti_line_info_t *li,
+ const char * const * file_names)
{
struct jr_code_debug_info rec;
- size_t sret, len, size, flen;
+ size_t sret, len, size, flen = 0;
uint64_t addr;
- const char *fn = file;
FILE *fp = agent;
int i;
@@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
return -1;
}
- flen = strlen(file) + 1;
+ for (i = 0; i < nr_lines; ++i) {
+ flen += strlen(file_names[i]) + 1;
+ }
rec.p.id = JIT_CODE_DEBUG_INFO;
size = sizeof(rec);
@@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
* file[] : source file name
*/
size += nr_lines * sizeof(struct debug_entry);
- size += flen * nr_lines;
+ size += flen;
rec.p.total_size = size;
/*
@@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
if (sret != 1)
goto error;
- sret = fwrite_unlocked(fn, flen, 1, fp);
+ sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
if (sret != 1)
goto error;
}
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
index fe32d8344a82..6ed82f6c06dd 100644
--- a/tools/perf/jvmti/jvmti_agent.h
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -14,6 +14,7 @@ typedef struct {
unsigned long pc;
int line_number;
int discrim; /* discriminator -- 0 for now */
+ jmethodID methodID;
} jvmti_line_info_t;
void *jvmti_open(void);
@@ -22,11 +23,9 @@ int jvmti_write_code(void *agent, char const *symbol_name,
uint64_t vma, void const *code,
const unsigned int code_size);
-int jvmti_write_debug_info(void *agent,
- uint64_t code,
- const char *file,
+int jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,
jvmti_line_info_t *li,
- int nr_lines);
+ const char * const * file_names);
#if defined(__cplusplus)
}
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c62c9fc9a525..6add3e982614 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
tab[lines].pc = (unsigned long)pc;
tab[lines].line_number = loc_tab[i].line_number;
tab[lines].discrim = 0; /* not yet used */
+ tab[lines].methodID = m;
lines++;
} else {
break;
@@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
return JVMTI_ERROR_NONE;
}
+static void
+copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
+{
+ /*
+ * Assume path name is class hierarchy, this is a common practice with Java programs
+ */
+ if (*class_sign == 'L') {
+ int j, i = 0;
+ char *p = strrchr(class_sign, '/');
+ if (p) {
+ /* drop the 'L' prefix and copy up to the final '/' */
+ for (i = 0; i < (p - class_sign); i++)
+ result[i] = class_sign[i+1];
+ }
+ /*
+ * append file name, we use loops and not string ops to avoid modifying
+ * class_sign which is used later for the symbol name
+ */
+ for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
+ result[i] = file_name[j];
+
+ result[i] = '\0';
+ } else {
+ /* fallback case */
+ size_t file_name_len = strlen(file_name);
+ strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
+ }
+}
+
+static jvmtiError
+get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
+{
+ jvmtiError ret;
+ jclass decl_class;
+ char *file_name = NULL;
+ char *class_sign = NULL;
+ char fn[PATH_MAX];
+ size_t len;
+
+ ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
+ if (ret != JVMTI_ERROR_NONE) {
+ print_error(jvmti, "GetMethodDeclaringClass", ret);
+ return ret;
+ }
+
+ ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
+ if (ret != JVMTI_ERROR_NONE) {
+ print_error(jvmti, "GetSourceFileName", ret);
+ return ret;
+ }
+
+ ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
+ if (ret != JVMTI_ERROR_NONE) {
+ print_error(jvmti, "GetClassSignature", ret);
+ goto free_file_name_error;
+ }
+
+ copy_class_filename(class_sign, file_name, fn, PATH_MAX);
+ len = strlen(fn);
+ *buffer = malloc((len + 1) * sizeof(char));
+ if (!*buffer) {
+ print_error(jvmti, "GetClassSignature", ret);
+ ret = JVMTI_ERROR_OUT_OF_MEMORY;
+ goto free_class_sign_error;
+ }
+ strcpy(*buffer, fn);
+ ret = JVMTI_ERROR_NONE;
+
+free_class_sign_error:
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
+free_file_name_error:
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
+
+ return ret;
+}
+
+static jvmtiError
+fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
+ const jvmti_line_info_t * line_tab,
+ char ** file_names)
+{
+ int index;
+ jvmtiError ret;
+
+ for (index = 0; index < nr_lines; ++index) {
+ ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
+ if (ret != JVMTI_ERROR_NONE)
+ return ret;
+ }
+
+ return JVMTI_ERROR_NONE;
+}
+
static void JNICALL
compiled_method_load_cb(jvmtiEnv *jvmti,
jmethodID method,
@@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
const void *compile_info)
{
jvmti_line_info_t *line_tab = NULL;
+ char ** line_file_names = NULL;
jclass decl_class;
char *class_sign = NULL;
char *func_name = NULL;
char *func_sign = NULL;
- char *file_name= NULL;
+ char *file_name = NULL;
char fn[PATH_MAX];
uint64_t addr = (uint64_t)(uintptr_t)code_addr;
jvmtiError ret;
int nr_lines = 0; /* in line_tab[] */
size_t len;
+ int output_debug_info = 0;
ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
&decl_class);
@@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
if (ret != JVMTI_ERROR_NONE) {
warnx("jvmti: cannot get line table for method");
nr_lines = 0;
+ } else if (nr_lines > 0) {
+ line_file_names = malloc(sizeof(char*) * nr_lines);
+ if (!line_file_names) {
+ warnx("jvmti: cannot allocate space for line table method names");
+ } else {
+ memset(line_file_names, 0, sizeof(char*) * nr_lines);
+ ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: fill_source_filenames failed");
+ } else {
+ output_debug_info = 1;
+ }
+ }
}
}
@@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
goto error;
}
- /*
- * Assume path name is class hierarchy, this is a common practice with Java programs
- */
- if (*class_sign == 'L') {
- int j, i = 0;
- char *p = strrchr(class_sign, '/');
- if (p) {
- /* drop the 'L' prefix and copy up to the final '/' */
- for (i = 0; i < (p - class_sign); i++)
- fn[i] = class_sign[i+1];
- }
- /*
- * append file name, we use loops and not string ops to avoid modifying
- * class_sign which is used later for the symbol name
- */
- for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
- fn[i] = file_name[j];
- fn[i] = '\0';
- } else {
- /* fallback case */
- strcpy(fn, file_name);
- }
+ copy_class_filename(class_sign, file_name, fn, PATH_MAX);
+
/*
* write source line info record if we have it
*/
- if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines))
- warnx("jvmti: write_debug_info() failed");
+ if (output_debug_info)
+ if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
+ warnx("jvmti: write_debug_info() failed");
len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
{
@@ -223,6 +313,13 @@ error:
(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
(*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
free(line_tab);
+ while (line_file_names && (nr_lines > 0)) {
+ if (line_file_names[nr_lines - 1]) {
+ free(line_file_names[nr_lines - 1]);
+ }
+ nr_lines -= 1;
+ }
+ free(line_file_names);
}
static void JNICALL
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index 345f5d6e9ed5..fdf75d45efff 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -162,8 +162,37 @@ __perf_main ()
# List possible events for -e option
elif [[ $prev == @("-e"|"--event") &&
$prev_skip_opts == @(record|stat|top) ]]; then
- evts=$($cmd list --raw-dump)
- __perfcomp_colon "$evts" "$cur"
+
+ local cur1=${COMP_WORDS[COMP_CWORD]}
+ local raw_evts=$($cmd list --raw-dump)
+ local arr s tmp result
+
+ if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($raw_evts)
+ IFS="$OLD_IFS"
+
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == *cpu/* ]]; then
+ tmp=${s#*cpu/}
+ result=$result" ""cpu/"${tmp^^}
+ else
+ result=$result" "$s
+ fi
+ done
+
+ evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ else
+ evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ fi
+
+ if [[ "$cur1" == , ]]; then
+ __perfcomp_colon "$evts" ""
+ else
+ __perfcomp_colon "$evts" "$cur1"
+ fi
else
# List subcommands for perf commands
if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
@@ -246,11 +275,21 @@ fi
type perf &>/dev/null &&
_perf()
{
+ if [[ "$COMP_WORDBREAKS" != *,* ]]; then
+ COMP_WORDBREAKS="${COMP_WORDBREAKS},"
+ export COMP_WORDBREAKS
+ fi
+
+ if [[ "$COMP_WORDBREAKS" == *:* ]]; then
+ COMP_WORDBREAKS="${COMP_WORDBREAKS/:/}"
+ export COMP_WORDBREAKS
+ fi
+
local cur words cword prev
if [ $preload_get_comp_words_by_ref = "true" ]; then
- _get_comp_words_by_ref -n =: cur words cword prev
+ _get_comp_words_by_ref -n =:, cur words cword prev
else
- __perf_get_comp_words_by_ref -n =: cur words cword prev
+ __perf_get_comp_words_by_ref -n =:, cur words cword prev
fi
__perf_main
} &&
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 62b13518bc6e..1b3fc8ec0fa2 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = {
{ "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
@@ -485,7 +485,7 @@ int main(int argc, const char **argv)
argv[0] = cmd;
}
if (strstarts(cmd, "trace")) {
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 2357f4ccc9c7..cfe46236a5e5 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -50,6 +50,7 @@ struct record_opts {
bool sample_time_set;
bool sample_cpu;
bool period;
+ bool period_set;
bool running_time;
bool full_auxtrace;
bool auxtrace_snapshot_mode;
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
new file mode 100644
index 000000000000..2db45c40ebc7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
@@ -0,0 +1,62 @@
+[
+ {
+ "PublicDescription": "Attributable Level 1 data cache access, read",
+ "EventCode": "0x40",
+ "EventName": "l1d_cache_rd",
+ "BriefDescription": "L1D cache read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache access, write ",
+ "EventCode": "0x41",
+ "EventName": "l1d_cache_wr",
+ "BriefDescription": "L1D cache write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache refill, read",
+ "EventCode": "0x42",
+ "EventName": "l1d_cache_refill_rd",
+ "BriefDescription": "L1D cache refill read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache refill, write",
+ "EventCode": "0x43",
+ "EventName": "l1d_cache_refill_wr",
+ "BriefDescription": "L1D refill write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill, read",
+ "EventCode": "0x4C",
+ "EventName": "l1d_tlb_refill_rd",
+ "BriefDescription": "L1D tlb refill read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill, write",
+ "EventCode": "0x4D",
+ "EventName": "l1d_tlb_refill_wr",
+ "BriefDescription": "L1D tlb refill write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access, read",
+ "EventCode": "0x4E",
+ "EventName": "l1d_tlb_rd",
+ "BriefDescription": "L1D tlb read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access, write",
+ "EventCode": "0x4F",
+ "EventName": "l1d_tlb_wr",
+ "BriefDescription": "L1D tlb write",
+ },
+ {
+ "PublicDescription": "Bus access read",
+ "EventCode": "0x60",
+ "EventName": "bus_access_rd",
+ "BriefDescription": "Bus access read",
+ },
+ {
+ "PublicDescription": "Bus access write",
+ "EventCode": "0x61",
+ "EventName": "bus_access_wr",
+ "BriefDescription": "Bus access write",
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json
new file mode 100644
index 000000000000..3b6208763e50
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json
@@ -0,0 +1,27 @@
+[
+ {,
+ "EventCode": "0x7A",
+ "EventName": "BR_INDIRECT_SPEC",
+ "BriefDescription": "Branch speculatively executed - Indirect branch"
+ },
+ {,
+ "EventCode": "0xC9",
+ "EventName": "BR_COND",
+ "BriefDescription": "Conditional branch executed"
+ },
+ {,
+ "EventCode": "0xCA",
+ "EventName": "BR_INDIRECT_MISPRED",
+ "BriefDescription": "Indirect branch mispredicted"
+ },
+ {,
+ "EventCode": "0xCB",
+ "EventName": "BR_INDIRECT_MISPRED_ADDR",
+ "BriefDescription": "Indirect branch mispredicted because of address miscompare"
+ },
+ {,
+ "EventCode": "0xCC",
+ "EventName": "BR_COND_MISPRED",
+ "BriefDescription": "Conditional branch mispredicted"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json
new file mode 100644
index 000000000000..480d9f7460ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json
@@ -0,0 +1,22 @@
+[
+ {,
+ "EventCode": "0x60",
+ "EventName": "BUS_ACCESS_LD",
+ "BriefDescription": "Bus access - Read"
+ },
+ {,
+ "EventCode": "0x61",
+ "EventName": "BUS_ACCESS_ST",
+ "BriefDescription": "Bus access - Write"
+ },
+ {,
+ "EventCode": "0xC0",
+ "EventName": "EXT_MEM_REQ",
+ "BriefDescription": "External memory request"
+ },
+ {,
+ "EventCode": "0xC1",
+ "EventName": "EXT_MEM_REQ_NC",
+ "BriefDescription": "Non-cacheable external memory request"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json
new file mode 100644
index 000000000000..11baad6344b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json
@@ -0,0 +1,27 @@
+[
+ {,
+ "EventCode": "0xC2",
+ "EventName": "PREFETCH_LINEFILL",
+ "BriefDescription": "Linefill because of prefetch"
+ },
+ {,
+ "EventCode": "0xC3",
+ "EventName": "PREFETCH_LINEFILL_DROP",
+ "BriefDescription": "Instruction Cache Throttle occurred"
+ },
+ {,
+ "EventCode": "0xC4",
+ "EventName": "READ_ALLOC_ENTER",
+ "BriefDescription": "Entering read allocate mode"
+ },
+ {,
+ "EventCode": "0xC5",
+ "EventName": "READ_ALLOC",
+ "BriefDescription": "Read allocate mode"
+ },
+ {,
+ "EventCode": "0xC8",
+ "EventName": "EXT_SNOOP",
+ "BriefDescription": "SCU Snooped data from another CPU for this CPU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json
new file mode 100644
index 000000000000..480d9f7460ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json
@@ -0,0 +1,22 @@
+[
+ {,
+ "EventCode": "0x60",
+ "EventName": "BUS_ACCESS_LD",
+ "BriefDescription": "Bus access - Read"
+ },
+ {,
+ "EventCode": "0x61",
+ "EventName": "BUS_ACCESS_ST",
+ "BriefDescription": "Bus access - Write"
+ },
+ {,
+ "EventCode": "0xC0",
+ "EventName": "EXT_MEM_REQ",
+ "BriefDescription": "External memory request"
+ },
+ {,
+ "EventCode": "0xC1",
+ "EventName": "EXT_MEM_REQ_NC",
+ "BriefDescription": "Non-cacheable external memory request"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json
new file mode 100644
index 000000000000..73a22402d003
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json
@@ -0,0 +1,32 @@
+[
+ {,
+ "EventCode": "0x86",
+ "EventName": "EXC_IRQ",
+ "BriefDescription": "Exception taken, IRQ"
+ },
+ {,
+ "EventCode": "0x87",
+ "EventName": "EXC_FIQ",
+ "BriefDescription": "Exception taken, FIQ"
+ },
+ {,
+ "EventCode": "0xC6",
+ "EventName": "PRE_DECODE_ERR",
+ "BriefDescription": "Pre-decode error"
+ },
+ {,
+ "EventCode": "0xD0",
+ "EventName": "L1I_CACHE_ERR",
+ "BriefDescription": "L1 Instruction Cache (data or tag) memory error"
+ },
+ {,
+ "EventCode": "0xD1",
+ "EventName": "L1D_CACHE_ERR",
+ "BriefDescription": "L1 Data Cache (data, tag or dirty) memory error, correctable or non-correctable"
+ },
+ {,
+ "EventCode": "0xD2",
+ "EventName": "TLB_ERR",
+ "BriefDescription": "TLB memory error"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json
new file mode 100644
index 000000000000..3149fb90555a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json
@@ -0,0 +1,52 @@
+[
+ {,
+ "EventCode": "0xC7",
+ "EventName": "STALL_SB_FULL",
+ "BriefDescription": "Data Write operation that stalls the pipeline because the store buffer is full"
+ },
+ {,
+ "EventCode": "0xE0",
+ "EventName": "OTHER_IQ_DEP_STALL",
+ "BriefDescription": "Cycles that the DPU IQ is empty and that is not because of a recent micro-TLB miss, instruction cache miss or pre-decode error"
+ },
+ {,
+ "EventCode": "0xE1",
+ "EventName": "IC_DEP_STALL",
+ "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction cache miss being processed"
+ },
+ {,
+ "EventCode": "0xE2",
+ "EventName": "IUTLB_DEP_STALL",
+ "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction micro-TLB miss being processed"
+ },
+ {,
+ "EventCode": "0xE3",
+ "EventName": "DECODE_DEP_STALL",
+ "BriefDescription": "Cycles the DPU IQ is empty and there is a pre-decode error being processed"
+ },
+ {,
+ "EventCode": "0xE4",
+ "EventName": "OTHER_INTERLOCK_STALL",
+ "BriefDescription": "Cycles there is an interlock other than Advanced SIMD/Floating-point instructions or load/store instruction"
+ },
+ {,
+ "EventCode": "0xE5",
+ "EventName": "AGU_DEP_STALL",
+ "BriefDescription": "Cycles there is an interlock for a load/store instruction waiting for data to calculate the address in the AGU"
+ },
+ {,
+ "EventCode": "0xE6",
+ "EventName": "SIMD_DEP_STALL",
+ "BriefDescription": "Cycles there is an interlock for an Advanced SIMD/Floating-point operation."
+ },
+ {,
+ "EventCode": "0xE7",
+ "EventName": "LD_DEP_STALL",
+ "BriefDescription": "Cycles there is a stall in the Wr stage because of a load miss"
+ },
+ {,
+ "EventCode": "0xE8",
+ "EventName": "ST_DEP_STALL",
+ "BriefDescription": "Cycles there is a stall in the Wr stage because of a store"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
new file mode 100644
index 000000000000..e61c9ca6cf9e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -0,0 +1,16 @@
+# Format:
+# MIDR,Version,JSON/file/pathname,Type
+#
+# where
+# MIDR Processor version
+# Variant[23:20] and Revision [3:0] should be zero.
+# Version could be used to track version of of JSON file
+# but currently unused.
+# JSON/file/pathname is the path to JSON file, relative
+# to tools/perf/pmu-events/arch/arm64/.
+# Type is core, uncore etc
+#
+#
+#Family-model,Version,Filename,EventType
+0x00000000420f5160,v1,cavium,core
+0x00000000410fd03[[:xdigit:]],v1,cortex-a53,core
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index a0f3a11ca19f..229150e7ab7d 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -13,13 +13,5 @@
#
# Power8 entries
-004b0000,1,power8,core
-004b0201,1,power8,core
-004c0000,1,power8,core
-004d0000,1,power8,core
-004d0100,1,power8,core
-004d0200,1,power8,core
-004c0100,1,power8,core
-004e0100,1,power9,core
-004e0200,1,power9,core
-004e1200,1,power9,core
+004[bcd][[:xdigit:]]{4},1,power8,core
+004e[[:xdigit:]]{4},1,power9,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/cache.json b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
index 18f6645f2897..7945c5196c43 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
@@ -125,11 +125,6 @@
"BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied"
},
{,
- "EventCode": "0x3006C",
- "EventName": "PM_RUN_CYC_SMT2_MODE",
- "BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT2 mode"
- },
- {,
"EventCode": "0x1C058",
"EventName": "PM_DTLB_MISS_16G",
"BriefDescription": "Data TLB Miss page size 16G"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
index c63a919eda98..bd8361b5fd6a 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
@@ -1,10 +1,5 @@
[
{,
- "EventCode": "0x3E15C",
- "EventName": "PM_MRK_L2_TM_ST_ABORT_SISTER",
- "BriefDescription": "TM marked store abort for this thread"
- },
- {,
"EventCode": "0x25044",
"EventName": "PM_IPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request"
@@ -369,4 +364,4 @@
"EventName": "PM_IPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/marked.json b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
index b9df54fb37e3..22f9f32060a8 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
@@ -1,10 +1,5 @@
[
{,
- "EventCode": "0x3C052",
- "EventName": "PM_DATA_SYS_PUMP_MPRED",
- "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
- },
- {,
"EventCode": "0x3013E",
"EventName": "PM_MRK_STALL_CMPLU_CYC",
"BriefDescription": "Number of cycles the marked instruction is experiencing a stall while it is next to complete (NTC)"
@@ -255,6 +250,11 @@
"BriefDescription": "A Page Directory Entry was reloaded to a level 1 page walk cache from the core's L3 data cache"
},
{,
+ "EventCode": "0x3C052",
+ "EventName": "PM_DATA_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
+ },
+ {,
"EventCode": "0x4D142",
"EventName": "PM_MRK_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load"
@@ -435,21 +435,6 @@
"BriefDescription": "ITLB Reloaded. Counts 1 per ITLB miss for HPT but multiple for radix depending on number of levels traveresed"
},
{,
- "EventCode": "0x2D024",
- "EventName": "PM_RADIX_PWC_L2_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB but hit on both the first and second levels of page walk cache."
- },
- {,
- "EventCode": "0x3F056",
- "EventName": "PM_RADIX_PWC_L3_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB but hit on the first, second, and third levels of page walk cache."
- },
- {,
- "EventCode": "0x4E014",
- "EventName": "PM_TM_TX_PASS_RUN_INST",
- "BriefDescription": "Run instructions spent in successful transactions"
- },
- {,
"EventCode": "0x1E044",
"EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
@@ -644,4 +629,4 @@
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 54cc3be00fc2..5ce312973f1e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -80,6 +80,11 @@
"BriefDescription": "A radix translation attempt missed in the TLB and all levels of page walk cache."
},
{,
+ "EventCode": "0x26882",
+ "EventName": "PM_L2_DC_INV",
+ "BriefDescription": "D-cache invalidates sent over the reload bus to the core"
+ },
+ {,
"EventCode": "0x24048",
"EventName": "PM_INST_FROM_LMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)"
@@ -95,11 +100,6 @@
"BriefDescription": "Number of TM transactions that passed"
},
{,
- "EventCode": "0xD1A0",
- "EventName": "PM_MRK_LSU_FLUSH_LHS",
- "BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
- },
- {,
"EventCode": "0xF088",
"EventName": "PM_LSU0_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
@@ -127,7 +127,7 @@
{,
"EventCode": "0xD08C",
"EventName": "PM_LSU2_LDMX_FIN",
- "BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])"
+ "BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
{,
"EventCode": "0x300F8",
@@ -205,11 +205,6 @@
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load"
},
{,
- "EventCode": "0xF0B4",
- "EventName": "PM_DC_PREF_CONS_ALLOC",
- "BriefDescription": "Prefetch stream allocated in the conservative phase by either the hardware prefetch mechanism or software prefetch"
- },
- {,
"EventCode": "0xF894",
"EventName": "PM_LSU3_L1_CAM_CANCEL",
"BriefDescription": "ls3 l1 tm cam cancel"
@@ -220,21 +215,11 @@
"BriefDescription": "Dispatch Flush: TLBIE"
},
{,
- "EventCode": "0xD1A4",
- "EventName": "PM_MRK_LSU_FLUSH_SAO",
- "BriefDescription": "A load-hit-load condition with Strong Address Ordering will have address compare disabled and flush"
- },
- {,
"EventCode": "0x4E11E",
"EventName": "PM_MRK_DATA_FROM_DMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load"
},
{,
- "EventCode": "0x5894",
- "EventName": "PM_LWSYNC",
- "BriefDescription": "Lwsync instruction decoded and transferred"
- },
- {,
"EventCode": "0x14156",
"EventName": "PM_MRK_DATA_FROM_L2_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load"
@@ -245,11 +230,6 @@
"BriefDescription": "Read clearing SC"
},
{,
- "EventCode": "0x50A0",
- "EventName": "PM_HWSYNC",
- "BriefDescription": "Hwsync instruction decoded and transferred"
- },
- {,
"EventCode": "0x168B0",
"EventName": "PM_L3_P1_NODE_PUMP",
"BriefDescription": "L3 PF sent with nodal scope port 1, counts even retried requests"
@@ -265,6 +245,11 @@
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load"
},
{,
+ "EventCode": "0x468AE",
+ "EventName": "PM_L3_P3_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 3 (memory only), every retry counted"
+ },
+ {,
"EventCode": "0x460A8",
"EventName": "PM_SN_HIT",
"BriefDescription": "Any port snooper hit L3. Up to 4 can happen in a cycle but we only count 1"
@@ -280,11 +265,6 @@
"BriefDescription": "Prefetch stream allocated by the hardware prefetch mechanism"
},
{,
- "EventCode": "0xF0BC",
- "EventName": "PM_LS2_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0xD0AC",
"EventName": "PM_SRQ_SYNC_CYC",
"BriefDescription": "A sync is in the S2Q (edge detect to count)"
@@ -380,26 +360,11 @@
"BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT4 mode"
},
{,
- "EventCode": "0x5088",
- "EventName": "PM_DECODE_FUSION_OP_PRESERV",
- "BriefDescription": "Destructive op operand preservation"
- },
- {,
"EventCode": "0x1D14E",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load"
},
{,
- "EventCode": "0x509C",
- "EventName": "PM_FORCED_NOP",
- "BriefDescription": "Instruction was forced to execute as a nop because it was found to behave like a nop (have no effect) at decode time"
- },
- {,
- "EventCode": "0xC098",
- "EventName": "PM_LS2_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x20058",
"EventName": "PM_DARQ1_10_12_ENTRIES",
"BriefDescription": "Cycles in which 10 or more DARQ1 entries (out of 12) are in use"
@@ -435,11 +400,6 @@
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
{,
- "EventCode": "0x4505E",
- "EventName": "PM_FLOP_CMPL",
- "BriefDescription": "Floating Point Operation Finished"
- },
- {,
"EventCode": "0x1D144",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load"
@@ -480,14 +440,9 @@
"BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issued"
},
{,
- "EventCode": "0xC094",
- "EventName": "PM_LS0_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
- "EventCode": "0xF8BC",
- "EventName": "PM_LS3_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+ "EventCode": "0x460AE",
+ "EventName": "PM_L3_P2_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 2 (memory only), every retry counted"
},
{,
"EventCode": "0x58B0",
@@ -505,11 +460,6 @@
"BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)"
},
{,
- "EventCode": "0xD998",
- "EventName": "PM_MRK_LSU_FLUSH_EMSH",
- "BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
- },
- {,
"EventCode": "0xF8A0",
"EventName": "PM_NON_DATA_STORE",
"BriefDescription": "All ops that drain from s2q to L2 and contain no data"
@@ -525,11 +475,6 @@
"BriefDescription": "Unconditional Branch Completed. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was covenrted to a Resolve."
},
{,
- "EventCode": "0x1F056",
- "EventName": "PM_RADIX_PWC_L1_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB and only the first level page walk cache was a hit."
- },
- {,
"EventCode": "0xF8A8",
"EventName": "PM_DC_PREF_FUZZY_CONF",
"BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)"
@@ -545,6 +490,11 @@
"BriefDescription": "Load tm L1 miss"
},
{,
+ "EventCode": "0xC880",
+ "EventName": "PM_LS1_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x2894",
"EventName": "PM_TM_OUTER_TEND",
"BriefDescription": "Completion time outer tend"
@@ -565,21 +515,11 @@
"BriefDescription": "Marked derat reload (miss) for any page size"
},
{,
- "EventCode": "0x160A0",
- "EventName": "PM_L3_PF_MISS_L3",
- "BriefDescription": "L3 PF missed in L3"
- },
- {,
"EventCode": "0x1C04A",
"EventName": "PM_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load"
},
{,
- "EventCode": "0xD99C",
- "EventName": "PM_MRK_LSU_FLUSH_UE",
- "BriefDescription": "Correctable ECC error on reload data, reported at critical data forward time"
- },
- {,
"EventCode": "0x268B0",
"EventName": "PM_L3_P1_GRP_PUMP",
"BriefDescription": "L3 PF sent with grp scope port 1, counts even retried requests"
@@ -630,11 +570,6 @@
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding"
},
{,
- "EventCode": "0x5884",
- "EventName": "PM_DECODE_LANES_NOT_AVAIL",
- "BriefDescription": "Decode has something to transmit but dispatch lanes are not available"
- },
- {,
"EventCode": "0x3C042",
"EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load"
@@ -690,9 +625,9 @@
"BriefDescription": "False LHS match detected"
},
{,
- "EventCode": "0xD9A4",
- "EventName": "PM_MRK_LSU_FLUSH_LARX_STCX",
- "BriefDescription": "A larx is flushed because an older larx has an LMQ reservation for the same thread. A stcx is flushed because an older stcx is in the LMQ. The flush happens when the older larx/stcx relaunches"
+ "EventCode": "0xF0B0",
+ "EventName": "PM_L3_LD_PREF",
+ "BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
},
{,
"EventCode": "0x4D012",
@@ -715,9 +650,9 @@
"BriefDescription": "All successful Ld/St dispatches for this thread that were an L2 miss (excludes i_l2mru_tch_reqs)"
},
{,
- "EventCode": "0xF8B8",
- "EventName": "PM_LS1_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+ "EventCode": "0x160A0",
+ "EventName": "PM_L3_PF_MISS_L3",
+ "BriefDescription": "L3 PF missed in L3"
},
{,
"EventCode": "0x408C",
@@ -765,11 +700,6 @@
"BriefDescription": "Completion time nested tend"
},
{,
- "EventCode": "0x36084",
- "EventName": "PM_L2_RCST_DISP",
- "BriefDescription": "All D-side store dispatch attempts for this thread"
- },
- {,
"EventCode": "0x368A0",
"EventName": "PM_L3_PF_OFF_CHIP_CACHE",
"BriefDescription": "L3 PF from Off chip cache"
@@ -830,11 +760,6 @@
"BriefDescription": "Rotating sample of 16 snoop valids"
},
{,
- "EventCode": "0x16084",
- "EventName": "PM_L2_RCLD_DISP",
- "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
- },
- {,
"EventCode": "0x1608C",
"EventName": "PM_RC0_BUSY",
"BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC lifetime (mach0 used as sample point)"
@@ -842,7 +767,7 @@
{,
"EventCode": "0x36082",
"EventName": "PM_L2_LD_DISP",
- "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)."
+ "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)"
},
{,
"EventCode": "0xF8B0",
@@ -905,11 +830,6 @@
"BriefDescription": "Instruction prefetch requests"
},
{,
- "EventCode": "0xC898",
- "EventName": "PM_LS3_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x488C",
"EventName": "PM_IC_PREF_WRITE",
"BriefDescription": "Instruction prefetch written into IL1"
@@ -1017,7 +937,7 @@
{,
"EventCode": "0x3E05E",
"EventName": "PM_L3_CO_MEPF",
- "BriefDescription": "L3 castouts in Mepf state for this thread"
+ "BriefDescription": "L3 CO of line in Mep state (includes casthrough to memory). The Mepf state indicates that a line was brought in to satisfy an L3 prefetch request"
},
{,
"EventCode": "0x460A2",
@@ -1205,11 +1125,6 @@
"BriefDescription": "Non transactional conflict from LSU, gets reported to TEXASR"
},
{,
- "EventCode": "0xD198",
- "EventName": "PM_MRK_LSU_FLUSH_ATOMIC",
- "BriefDescription": "Quad-word loads (lq) are considered atomic because they always span at least 2 slices. If a snoop or store from another thread changes the data the load is accessing between the 2 or 3 pieces of the lq instruction, the lq will be flushed"
- },
- {,
"EventCode": "0x201E0",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load"
@@ -1295,11 +1210,6 @@
"BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF; CR; XVF (XER/VSCR/FPSCR)"
},
{,
- "EventCode": "0xC894",
- "EventName": "PM_LS1_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x360A2",
"EventName": "PM_L3_L2_CO_HIT",
"BriefDescription": "L2 CO hits"
@@ -1325,11 +1235,6 @@
"BriefDescription": "L2 Castouts - Shared (Tx,Sx)"
},
{,
- "EventCode": "0xD884",
- "EventName": "PM_LSU3_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x26092",
"EventName": "PM_L2_LD_MISS_64B",
"BriefDescription": "All successful D-side load dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 64B(i.e., M=1)"
@@ -1362,12 +1267,12 @@
{,
"EventCode": "0xD8A8",
"EventName": "PM_ISLB_MISS",
- "BriefDescription": "Instruction SLB miss - Total of all segment sizes"
+ "BriefDescription": "Instruction SLB Miss - Total of all segment sizes"
},
{,
- "EventCode": "0xD19C",
- "EventName": "PM_MRK_LSU_FLUSH_RELAUNCH_MISS",
- "BriefDescription": "If a load that has already returned data and has to relaunch for any reason then gets a miss (erat, setp, data cache), it will often be flushed at relaunch time because the data might be inconsistent"
+ "EventCode": "0x368AE",
+ "EventName": "PM_L3_P1_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
},
{,
"EventCode": "0x260A2",
@@ -1385,6 +1290,11 @@
"BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT"
},
{,
+ "EventCode": "0xC084",
+ "EventName": "PM_LS2_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x1608E",
"EventName": "PM_ST_CAUSED_FAIL",
"BriefDescription": "Non-TM Store caused any thread to fail"
@@ -1410,11 +1320,6 @@
"BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
},
{,
- "EventCode": "0xD084",
- "EventName": "PM_LSU2_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x48B8",
"EventName": "PM_BR_MPRED_TAKEN_TA",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event."
@@ -1450,29 +1355,24 @@
"BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software."
},
{,
+ "EventCode": "0x36084",
+ "EventName": "PM_L2_RCST_DISP",
+ "BriefDescription": "All D-side store dispatch attempts for this thread"
+ },
+ {,
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
"BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
},
{,
- "EventCode": "0x5090",
- "EventName": "PM_SHL_ST_DISABLE",
- "BriefDescription": "Store-Hit-Load Table Read Hit with entry Disabled (entry was disabled due to the entry shown to not prevent the flush)"
- },
- {,
"EventCode": "0x201E8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512"
},
{,
- "EventCode": "0x5084",
- "EventName": "PM_DECODE_FUSION_EXT_ADD",
- "BriefDescription": "32-bit extended addition"
- },
- {,
"EventCode": "0x36080",
"EventName": "PM_L2_INST",
- "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)."
+ "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)"
},
{,
"EventCode": "0x3504C",
@@ -1555,21 +1455,11 @@
"BriefDescription": "Memory Read With Intent to Modify for this thread"
},
{,
- "EventCode": "0x26882",
- "EventName": "PM_L2_DC_INV",
- "BriefDescription": "D-cache invalidates sent over the reload bus to the core"
- },
- {,
"EventCode": "0xC090",
"EventName": "PM_LSU_STCX",
"BriefDescription": "STCX sent to nest, i.e. total"
},
{,
- "EventCode": "0xD080",
- "EventName": "PM_LSU0_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x2C120",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load"
@@ -1610,11 +1500,6 @@
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request"
},
{,
- "EventCode": "0xD9A0",
- "EventName": "PM_MRK_LSU_FLUSH_LHL_SHL",
- "BriefDescription": "The instruction was flushed because of a sequential load/store consistency. If a load or store hits on an older load that has either been snooped (for loads) or has stale data (for stores)."
- },
- {,
"EventCode": "0x35042",
"EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request"
@@ -1692,7 +1577,7 @@
{,
"EventCode": "0x2001A",
"EventName": "PM_NTC_ALL_FIN",
- "BriefDescription": "Cycles after all instructions have finished to group completed"
+ "BriefDescription": "Cycles after instruction finished to instruction completed."
},
{,
"EventCode": "0x3005A",
@@ -1710,6 +1595,11 @@
"BriefDescription": "ls1 l1 tm cam cancel"
},
{,
+ "EventCode": "0x268AE",
+ "EventName": "PM_L3_P3_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 3, every retry counted"
+ },
+ {,
"EventCode": "0xE884",
"EventName": "PM_LS1_ERAT_MISS_PREF",
"BriefDescription": "LS1 Erat miss due to prefetch"
@@ -1742,7 +1632,7 @@
{,
"EventCode": "0x160B6",
"EventName": "PM_L3_WI0_BUSY",
- "BriefDescription": "Rotating sample of 8 WI valid"
+ "BriefDescription": "Rotating sample of 8 WI valid (duplicate)"
},
{,
"EventCode": "0x368AC",
@@ -1790,9 +1680,9 @@
"BriefDescription": "L2 guess system (VGS or RNS) and guess was correct (ie data beyond-group)"
},
{,
- "EventCode": "0x589C",
- "EventName": "PM_PTESYNC",
- "BriefDescription": "ptesync instruction counted when the instruction is decoded and transmitted"
+ "EventCode": "0x260AE",
+ "EventName": "PM_L3_P2_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 2, every retry counted"
},
{,
"EventCode": "0x26086",
@@ -1825,6 +1715,11 @@
"BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled"
},
{,
+ "EventCode": "0x46882",
+ "EventName": "PM_L2_ST_HIT",
+ "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
+ },
+ {,
"EventCode": "0x360AC",
"EventName": "PM_L3_SN0_BUSY",
"BriefDescription": "Lifetime, sample of snooper machine 0 valid"
@@ -1845,11 +1740,6 @@
"BriefDescription": "All successful D-Side Store dispatches that were an L2 miss for this thread"
},
{,
- "EventCode": "0xF8B4",
- "EventName": "PM_DC_PREF_XCONS_ALLOC",
- "BriefDescription": "Prefetch stream allocated in the Ultra conservative phase by either the hardware prefetch mechanism or software prefetch"
- },
- {,
"EventCode": "0x35048",
"EventName": "PM_IPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request"
@@ -1970,11 +1860,6 @@
"BriefDescription": "Cycles thread running at priority level 2 or 3"
},
{,
- "EventCode": "0x10134",
- "EventName": "PM_MRK_ST_DONE_L2",
- "BriefDescription": "marked store completed in L2 ( RC machine done)"
- },
- {,
"EventCode": "0x368B2",
"EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
"BriefDescription": "Initial scope=group (GS or NNS) but data from local node. Prediction too high"
@@ -2005,11 +1890,6 @@
"BriefDescription": "L2 guess grp (GS or NNS) and guess was not correct (ie data on-chip OR beyond-group)"
},
{,
- "EventCode": "0x368AE",
- "EventName": "PM_L3_P1_CO_RTY",
- "BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
- },
- {,
"EventCode": "0xC0AC",
"EventName": "PM_LSU_FLUSH_EMSH",
"BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
@@ -2035,11 +1915,6 @@
"BriefDescription": "RC requests that were on group (aka nodel) pump attempts"
},
{,
- "EventCode": "0xF0B0",
- "EventName": "PM_L3_LD_PREF",
- "BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
- },
- {,
"EventCode": "0x16080",
"EventName": "PM_L2_LD",
"BriefDescription": "All successful D-side Load dispatches for this thread (L2 miss + L2 hits)"
@@ -2050,6 +1925,11 @@
"BriefDescription": "Math flop instruction completed"
},
{,
+ "EventCode": "0xC080",
+ "EventName": "PM_LS0_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x368B0",
"EventName": "PM_L3_P1_SYS_PUMP",
"BriefDescription": "L3 PF sent with sys scope port 1, counts even retried requests"
@@ -2120,11 +2000,6 @@
"BriefDescription": "Conditional Branch Completed in which the HW correctly predicted the direction as taken. Counted at completion time"
},
{,
- "EventCode": "0xF0B8",
- "EventName": "PM_LS0_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x20132",
"EventName": "PM_MRK_DFU_FIN",
"BriefDescription": "Decimal Unit marked Instruction Finish"
@@ -2140,6 +2015,11 @@
"BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
},
{,
+ "EventCode": "0x16084",
+ "EventName": "PM_L2_RCLD_DISP",
+ "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
+ },
+ {,
"EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC",
"BriefDescription": "cycles to drain st from core to L2"
@@ -2225,11 +2105,6 @@
"BriefDescription": "Prefetch Canceled due to page boundary"
},
{,
- "EventCode": "0xF09C",
- "EventName": "PM_SLB_TABLEWALK_CYC",
- "BriefDescription": "Cycles when a tablewalk is pending on this thread on the SLB table"
- },
- {,
"EventCode": "0x460AA",
"EventName": "PM_L3_P0_CO_L31",
"BriefDescription": "L3 CO to L3.1 (LCO) port 0 with or without data"
@@ -2247,10 +2122,10 @@
{,
"EventCode": "0x46082",
"EventName": "PM_L2_ST_DISP",
- "BriefDescription": "All successful D-side store dispatches for this thread "
+ "BriefDescription": "All successful D-side store dispatches for this thread (L2 miss + L2 hits)"
},
{,
- "EventCode": "0x4609E",
+ "EventCode": "0x36880",
"EventName": "PM_L2_INST_MISS",
"BriefDescription": "All successful I-side dispatches that were an L2 miss for this thread (excludes i_l2mru_tch reqs)"
},
@@ -2340,9 +2215,9 @@
"BriefDescription": "All ISU rejects"
},
{,
- "EventCode": "0x46882",
- "EventName": "PM_L2_ST_HIT",
- "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
+ "EventCode": "0xC884",
+ "EventName": "PM_LS3_LD_VECTOR_FIN",
+ "BriefDescription": ""
},
{,
"EventCode": "0x360A8",
@@ -2360,11 +2235,6 @@
"BriefDescription": "Asserts when a i=1 store op is sent to the nest. No record of issue pipe (LS0/LS1) is maintained so this is for both pipes. Probably don't need separate LS0 and LS1"
},
{,
- "EventCode": "0xD880",
- "EventName": "PM_LSU1_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0xD0B8",
"EventName": "PM_LSU_LMQ_FULL_CYC",
"BriefDescription": "Counts the number of cycles the LMQ is full"
@@ -2389,4 +2259,4 @@
"EventName": "PM_L3_PF_USAGE",
"BriefDescription": "Rotating sample of 32 PF actives"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index bc2db636dabf..5af1abbe82c4 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -125,6 +125,11 @@
"BriefDescription": "Overflow from counter 5"
},
{,
+ "EventCode": "0x4505E",
+ "EventName": "PM_FLOP_CMPL",
+ "BriefDescription": "Floating Point Operation Finished"
+ },
+ {,
"EventCode": "0x2C018",
"EventName": "PM_CMPLU_STALL_DMISS_L21_L31",
"BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)"
@@ -390,11 +395,6 @@
"BriefDescription": "Ict empty for this thread due to branch mispred"
},
{,
- "EventCode": "0x3405E",
- "EventName": "PM_IFETCH_THROTTLE",
- "BriefDescription": "Cycles in which Instruction fetch throttle was active."
- },
- {,
"EventCode": "0x1F148",
"EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
@@ -422,7 +422,7 @@
{,
"EventCode": "0xD0A8",
"EventName": "PM_DSLB_MISS",
- "BriefDescription": "Data SLB Miss - Total of all segment sizes"
+ "BriefDescription": "gate_and(sd_pc_c0_comp_valid AND sd_pc_c0_comp_thread(0:1)=tid,sd_pc_c0_comp_ppc_count(0:3)) + gate_and(sd_pc_c1_comp_valid AND sd_pc_c1_comp_thread(0:1)=tid,sd_pc_c1_comp_ppc_count(0:3))"
},
{,
"EventCode": "0x4C058",
@@ -549,4 +549,4 @@
"EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
index 3ef8a10aac86..d0b89f930567 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
@@ -119,4 +119,4 @@
"EventName": "PM_1FLOP_CMPL",
"BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation completed"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/translation.json b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
index 8c0f12024afa..bc8e03d7a6b0 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
@@ -90,11 +90,6 @@
"BriefDescription": "stcx failed"
},
{,
- "EventCode": "0x20112",
- "EventName": "PM_MRK_NTF_FIN",
- "BriefDescription": "Marked next to finish instruction finished"
- },
- {,
"EventCode": "0x300F0",
"EventName": "PM_ST_MISS_L1",
"BriefDescription": "Store Missed L1"
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 73688a9dab2a..bba3152ec54a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -10,13 +10,30 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests that miss L2 cache.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -30,6 +47,43 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,6 +124,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
"EventCode": "0x27",
"Counter": "0,1,2,3",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -152,7 +245,30 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -174,26 +290,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "UMask": "0x4",
"Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "UMask": "0x8",
"Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
"EventCode": "0x63",
"Counter": "0,1,2,3",
@@ -261,7 +365,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -281,152 +385,161 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x21",
"Errata": "BDM35",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "BDM35",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -438,84 +551,83 @@
"Errata": "BDM100, BDE70",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "BDE70, BDM100",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -659,119 +771,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010001 ",
"Counter": "0,1,2,3",
@@ -784,6 +784,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020001 ",
"Counter": "0,1,2,3",
@@ -796,6 +797,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020001 ",
"Counter": "0,1,2,3",
@@ -808,6 +810,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020001 ",
"Counter": "0,1,2,3",
@@ -820,6 +823,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020001 ",
"Counter": "0,1,2,3",
@@ -832,6 +836,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020001 ",
"Counter": "0,1,2,3",
@@ -844,6 +849,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020001 ",
"Counter": "0,1,2,3",
@@ -856,6 +862,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0001 ",
"Counter": "0,1,2,3",
@@ -868,6 +875,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0001 ",
"Counter": "0,1,2,3",
@@ -880,6 +888,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0001 ",
"Counter": "0,1,2,3",
@@ -892,6 +901,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0001 ",
"Counter": "0,1,2,3",
@@ -904,6 +914,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0001 ",
"Counter": "0,1,2,3",
@@ -916,6 +927,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0001 ",
"Counter": "0,1,2,3",
@@ -928,6 +940,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010002 ",
"Counter": "0,1,2,3",
@@ -940,6 +953,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0002 ",
"Counter": "0,1,2,3",
@@ -952,6 +966,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0002 ",
"Counter": "0,1,2,3",
@@ -964,6 +979,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0002 ",
"Counter": "0,1,2,3",
@@ -976,6 +992,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0002 ",
"Counter": "0,1,2,3",
@@ -988,6 +1005,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0002 ",
"Counter": "0,1,2,3",
@@ -1000,6 +1018,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0002 ",
"Counter": "0,1,2,3",
@@ -1012,6 +1031,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010004 ",
"Counter": "0,1,2,3",
@@ -1024,6 +1044,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020004 ",
"Counter": "0,1,2,3",
@@ -1036,6 +1057,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020004 ",
"Counter": "0,1,2,3",
@@ -1048,6 +1070,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020004 ",
"Counter": "0,1,2,3",
@@ -1060,6 +1083,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020004 ",
"Counter": "0,1,2,3",
@@ -1072,6 +1096,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020004 ",
"Counter": "0,1,2,3",
@@ -1084,6 +1109,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020004 ",
"Counter": "0,1,2,3",
@@ -1096,6 +1122,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0004 ",
"Counter": "0,1,2,3",
@@ -1108,6 +1135,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0004 ",
"Counter": "0,1,2,3",
@@ -1120,6 +1148,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0004 ",
"Counter": "0,1,2,3",
@@ -1132,6 +1161,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0004 ",
"Counter": "0,1,2,3",
@@ -1144,6 +1174,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0004 ",
"Counter": "0,1,2,3",
@@ -1156,6 +1187,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0004 ",
"Counter": "0,1,2,3",
@@ -1168,6 +1200,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010008 ",
"Counter": "0,1,2,3",
@@ -1180,6 +1213,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020008 ",
"Counter": "0,1,2,3",
@@ -1192,6 +1226,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020008 ",
"Counter": "0,1,2,3",
@@ -1204,6 +1239,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020008 ",
"Counter": "0,1,2,3",
@@ -1216,6 +1252,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020008 ",
"Counter": "0,1,2,3",
@@ -1228,6 +1265,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020008 ",
"Counter": "0,1,2,3",
@@ -1240,6 +1278,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020008 ",
"Counter": "0,1,2,3",
@@ -1252,6 +1291,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0008 ",
"Counter": "0,1,2,3",
@@ -1264,6 +1304,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0008 ",
"Counter": "0,1,2,3",
@@ -1276,6 +1317,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0008 ",
"Counter": "0,1,2,3",
@@ -1288,6 +1330,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0008 ",
"Counter": "0,1,2,3",
@@ -1300,6 +1343,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0008 ",
"Counter": "0,1,2,3",
@@ -1312,6 +1356,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0008 ",
"Counter": "0,1,2,3",
@@ -1324,6 +1369,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010010 ",
"Counter": "0,1,2,3",
@@ -1336,6 +1382,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020010 ",
"Counter": "0,1,2,3",
@@ -1348,6 +1395,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020010 ",
"Counter": "0,1,2,3",
@@ -1360,6 +1408,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020010 ",
"Counter": "0,1,2,3",
@@ -1372,6 +1421,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020010 ",
"Counter": "0,1,2,3",
@@ -1384,6 +1434,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020010 ",
"Counter": "0,1,2,3",
@@ -1396,6 +1447,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020010 ",
"Counter": "0,1,2,3",
@@ -1408,6 +1460,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0010 ",
"Counter": "0,1,2,3",
@@ -1420,6 +1473,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0010 ",
"Counter": "0,1,2,3",
@@ -1432,6 +1486,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0010 ",
"Counter": "0,1,2,3",
@@ -1444,6 +1499,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0010 ",
"Counter": "0,1,2,3",
@@ -1456,6 +1512,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0010 ",
"Counter": "0,1,2,3",
@@ -1468,6 +1525,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0010 ",
"Counter": "0,1,2,3",
@@ -1480,6 +1538,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010020 ",
"Counter": "0,1,2,3",
@@ -1492,6 +1551,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020020 ",
"Counter": "0,1,2,3",
@@ -1504,6 +1564,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020020 ",
"Counter": "0,1,2,3",
@@ -1516,6 +1577,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020020 ",
"Counter": "0,1,2,3",
@@ -1528,6 +1590,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020020 ",
"Counter": "0,1,2,3",
@@ -1540,6 +1603,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020020 ",
"Counter": "0,1,2,3",
@@ -1552,6 +1616,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020020 ",
"Counter": "0,1,2,3",
@@ -1564,6 +1629,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0020 ",
"Counter": "0,1,2,3",
@@ -1576,6 +1642,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0020 ",
"Counter": "0,1,2,3",
@@ -1588,6 +1655,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0020 ",
"Counter": "0,1,2,3",
@@ -1600,6 +1668,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0020 ",
"Counter": "0,1,2,3",
@@ -1612,6 +1681,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0020 ",
"Counter": "0,1,2,3",
@@ -1624,6 +1694,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0020 ",
"Counter": "0,1,2,3",
@@ -1636,6 +1707,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010040 ",
"Counter": "0,1,2,3",
@@ -1648,6 +1720,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020040 ",
"Counter": "0,1,2,3",
@@ -1660,6 +1733,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020040 ",
"Counter": "0,1,2,3",
@@ -1672,6 +1746,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020040 ",
"Counter": "0,1,2,3",
@@ -1684,6 +1759,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020040 ",
"Counter": "0,1,2,3",
@@ -1696,6 +1772,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020040 ",
"Counter": "0,1,2,3",
@@ -1708,6 +1785,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020040 ",
"Counter": "0,1,2,3",
@@ -1720,6 +1798,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0040 ",
"Counter": "0,1,2,3",
@@ -1732,6 +1811,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0040 ",
"Counter": "0,1,2,3",
@@ -1744,6 +1824,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0040 ",
"Counter": "0,1,2,3",
@@ -1756,6 +1837,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0040 ",
"Counter": "0,1,2,3",
@@ -1768,6 +1850,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0040 ",
"Counter": "0,1,2,3",
@@ -1780,6 +1863,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0040 ",
"Counter": "0,1,2,3",
@@ -1792,6 +1876,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010080 ",
"Counter": "0,1,2,3",
@@ -1804,6 +1889,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020080 ",
"Counter": "0,1,2,3",
@@ -1816,6 +1902,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020080 ",
"Counter": "0,1,2,3",
@@ -1828,6 +1915,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020080 ",
"Counter": "0,1,2,3",
@@ -1840,6 +1928,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020080 ",
"Counter": "0,1,2,3",
@@ -1852,6 +1941,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020080 ",
"Counter": "0,1,2,3",
@@ -1864,6 +1954,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020080 ",
"Counter": "0,1,2,3",
@@ -1876,6 +1967,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0080 ",
"Counter": "0,1,2,3",
@@ -1888,6 +1980,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0080 ",
"Counter": "0,1,2,3",
@@ -1900,6 +1993,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0080 ",
"Counter": "0,1,2,3",
@@ -1912,6 +2006,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0080 ",
"Counter": "0,1,2,3",
@@ -1924,6 +2019,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0080 ",
"Counter": "0,1,2,3",
@@ -1936,6 +2032,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0080 ",
"Counter": "0,1,2,3",
@@ -1948,6 +2045,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010100 ",
"Counter": "0,1,2,3",
@@ -1960,6 +2058,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020100 ",
"Counter": "0,1,2,3",
@@ -1972,6 +2071,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020100 ",
"Counter": "0,1,2,3",
@@ -1984,6 +2084,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020100 ",
"Counter": "0,1,2,3",
@@ -1996,6 +2097,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020100 ",
"Counter": "0,1,2,3",
@@ -2008,6 +2110,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020100 ",
"Counter": "0,1,2,3",
@@ -2020,6 +2123,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020100 ",
"Counter": "0,1,2,3",
@@ -2032,6 +2136,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0100 ",
"Counter": "0,1,2,3",
@@ -2044,6 +2149,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0100 ",
"Counter": "0,1,2,3",
@@ -2056,6 +2162,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0100 ",
"Counter": "0,1,2,3",
@@ -2068,6 +2175,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0100 ",
"Counter": "0,1,2,3",
@@ -2080,6 +2188,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0100 ",
"Counter": "0,1,2,3",
@@ -2092,6 +2201,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0100 ",
"Counter": "0,1,2,3",
@@ -2104,6 +2214,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010200 ",
"Counter": "0,1,2,3",
@@ -2116,6 +2227,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020200 ",
"Counter": "0,1,2,3",
@@ -2128,6 +2240,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020200 ",
"Counter": "0,1,2,3",
@@ -2140,6 +2253,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020200 ",
"Counter": "0,1,2,3",
@@ -2152,6 +2266,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020200 ",
"Counter": "0,1,2,3",
@@ -2164,6 +2279,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020200 ",
"Counter": "0,1,2,3",
@@ -2176,6 +2292,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020200 ",
"Counter": "0,1,2,3",
@@ -2188,6 +2305,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0200 ",
"Counter": "0,1,2,3",
@@ -2200,6 +2318,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0200 ",
"Counter": "0,1,2,3",
@@ -2212,6 +2331,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0200 ",
"Counter": "0,1,2,3",
@@ -2224,6 +2344,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0200 ",
"Counter": "0,1,2,3",
@@ -2236,6 +2357,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0200 ",
"Counter": "0,1,2,3",
@@ -2248,6 +2370,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0200 ",
"Counter": "0,1,2,3",
@@ -2260,6 +2383,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000018000 ",
"Counter": "0,1,2,3",
@@ -2272,6 +2396,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080028000 ",
"Counter": "0,1,2,3",
@@ -2284,6 +2409,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100028000 ",
"Counter": "0,1,2,3",
@@ -2296,6 +2422,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200028000 ",
"Counter": "0,1,2,3",
@@ -2308,6 +2435,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400028000 ",
"Counter": "0,1,2,3",
@@ -2320,6 +2448,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000028000 ",
"Counter": "0,1,2,3",
@@ -2332,6 +2461,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80028000 ",
"Counter": "0,1,2,3",
@@ -2344,6 +2474,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c8000 ",
"Counter": "0,1,2,3",
@@ -2356,6 +2487,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c8000 ",
"Counter": "0,1,2,3",
@@ -2368,6 +2500,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c8000 ",
"Counter": "0,1,2,3",
@@ -2380,6 +2513,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c8000 ",
"Counter": "0,1,2,3",
@@ -2392,6 +2526,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c8000 ",
"Counter": "0,1,2,3",
@@ -2404,6 +2539,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c8000 ",
"Counter": "0,1,2,3",
@@ -2416,6 +2552,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010090 ",
"Counter": "0,1,2,3",
@@ -2428,6 +2565,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020090 ",
"Counter": "0,1,2,3",
@@ -2440,6 +2578,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020090 ",
"Counter": "0,1,2,3",
@@ -2452,6 +2591,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020090 ",
"Counter": "0,1,2,3",
@@ -2464,6 +2604,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020090 ",
"Counter": "0,1,2,3",
@@ -2476,6 +2617,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020090 ",
"Counter": "0,1,2,3",
@@ -2488,6 +2630,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020090 ",
"Counter": "0,1,2,3",
@@ -2500,6 +2643,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0090 ",
"Counter": "0,1,2,3",
@@ -2512,6 +2656,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0090 ",
"Counter": "0,1,2,3",
@@ -2524,6 +2669,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0090 ",
"Counter": "0,1,2,3",
@@ -2536,6 +2682,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0090 ",
"Counter": "0,1,2,3",
@@ -2548,6 +2695,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0090 ",
"Counter": "0,1,2,3",
@@ -2560,6 +2708,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0090 ",
"Counter": "0,1,2,3",
@@ -2572,6 +2721,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010120 ",
"Counter": "0,1,2,3",
@@ -2584,6 +2734,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020120 ",
"Counter": "0,1,2,3",
@@ -2596,6 +2747,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020120 ",
"Counter": "0,1,2,3",
@@ -2608,6 +2760,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020120 ",
"Counter": "0,1,2,3",
@@ -2620,6 +2773,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020120 ",
"Counter": "0,1,2,3",
@@ -2632,6 +2786,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020120 ",
"Counter": "0,1,2,3",
@@ -2644,6 +2799,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020120 ",
"Counter": "0,1,2,3",
@@ -2656,6 +2812,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0120 ",
"Counter": "0,1,2,3",
@@ -2668,6 +2825,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0120 ",
"Counter": "0,1,2,3",
@@ -2680,6 +2838,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0120 ",
"Counter": "0,1,2,3",
@@ -2692,6 +2851,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0120 ",
"Counter": "0,1,2,3",
@@ -2704,6 +2864,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0120 ",
"Counter": "0,1,2,3",
@@ -2716,6 +2877,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0120 ",
"Counter": "0,1,2,3",
@@ -2728,6 +2890,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010240 ",
"Counter": "0,1,2,3",
@@ -2740,6 +2903,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020240 ",
"Counter": "0,1,2,3",
@@ -2752,6 +2916,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020240 ",
"Counter": "0,1,2,3",
@@ -2764,6 +2929,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020240 ",
"Counter": "0,1,2,3",
@@ -2776,6 +2942,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020240 ",
"Counter": "0,1,2,3",
@@ -2788,6 +2955,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020240 ",
"Counter": "0,1,2,3",
@@ -2800,6 +2968,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020240 ",
"Counter": "0,1,2,3",
@@ -2812,6 +2981,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0240 ",
"Counter": "0,1,2,3",
@@ -2824,6 +2994,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0240 ",
"Counter": "0,1,2,3",
@@ -2836,6 +3007,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0240 ",
"Counter": "0,1,2,3",
@@ -2848,6 +3020,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0240 ",
"Counter": "0,1,2,3",
@@ -2860,6 +3033,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0240 ",
"Counter": "0,1,2,3",
@@ -2872,6 +3046,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0240 ",
"Counter": "0,1,2,3",
@@ -2884,6 +3059,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010091 ",
"Counter": "0,1,2,3",
@@ -2896,6 +3072,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020091 ",
"Counter": "0,1,2,3",
@@ -2908,6 +3085,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020091 ",
"Counter": "0,1,2,3",
@@ -2920,6 +3098,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020091 ",
"Counter": "0,1,2,3",
@@ -2932,6 +3111,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020091 ",
"Counter": "0,1,2,3",
@@ -2944,6 +3124,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020091 ",
"Counter": "0,1,2,3",
@@ -2956,6 +3137,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020091 ",
"Counter": "0,1,2,3",
@@ -2968,6 +3150,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0091 ",
"Counter": "0,1,2,3",
@@ -2980,6 +3163,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0091 ",
"Counter": "0,1,2,3",
@@ -2992,6 +3176,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0091 ",
"Counter": "0,1,2,3",
@@ -3004,6 +3189,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0091 ",
"Counter": "0,1,2,3",
@@ -3016,6 +3202,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0091 ",
"Counter": "0,1,2,3",
@@ -3028,6 +3215,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0091 ",
"Counter": "0,1,2,3",
@@ -3040,6 +3228,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010122 ",
"Counter": "0,1,2,3",
@@ -3052,6 +3241,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020122 ",
"Counter": "0,1,2,3",
@@ -3064,6 +3254,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020122 ",
"Counter": "0,1,2,3",
@@ -3076,6 +3267,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020122 ",
"Counter": "0,1,2,3",
@@ -3088,6 +3280,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020122 ",
"Counter": "0,1,2,3",
@@ -3100,6 +3293,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020122 ",
"Counter": "0,1,2,3",
@@ -3112,6 +3306,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020122 ",
"Counter": "0,1,2,3",
@@ -3124,6 +3319,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0122 ",
"Counter": "0,1,2,3",
@@ -3136,6 +3332,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0122 ",
"Counter": "0,1,2,3",
@@ -3148,6 +3345,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0122 ",
"Counter": "0,1,2,3",
@@ -3160,6 +3358,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0122 ",
"Counter": "0,1,2,3",
@@ -3172,6 +3371,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0122 ",
"Counter": "0,1,2,3",
@@ -3184,6 +3384,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0122 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 102bfb808199..689d478dae93 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,6 +1,6 @@
[
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -11,7 +11,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -22,7 +22,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -32,7 +31,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -42,7 +40,15 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -52,7 +58,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -62,7 +67,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -72,7 +76,43 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x15",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2a",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -82,7 +122,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -92,7 +132,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -102,7 +142,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -121,51 +161,5 @@
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xc7",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2a",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x15",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index b0cdf1f097a0..7142c76d7f11 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -10,7 +10,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -20,80 +20,49 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -104,7 +73,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -116,7 +85,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x18",
@@ -127,7 +96,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x18",
@@ -138,7 +107,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x24",
@@ -149,7 +128,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x24",
@@ -160,7 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x3c",
@@ -200,7 +211,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -263,7 +274,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -271,16 +282,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index ff5416d29d0d..c9154cebbdf0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -90,7 +90,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -170,13 +169,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"EventCode": "0xc8",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -251,13 +250,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"EventCode": "0xc9",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "RTM_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -431,6 +430,7 @@
"CounterHTOff": "3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020001 ",
"Counter": "0,1,2,3",
@@ -443,6 +443,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0001 ",
"Counter": "0,1,2,3",
@@ -455,6 +456,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000001 ",
"Counter": "0,1,2,3",
@@ -467,6 +469,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000001 ",
"Counter": "0,1,2,3",
@@ -479,6 +482,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000001 ",
"Counter": "0,1,2,3",
@@ -491,6 +495,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000001 ",
"Counter": "0,1,2,3",
@@ -503,6 +508,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000001 ",
"Counter": "0,1,2,3",
@@ -515,6 +521,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000001 ",
"Counter": "0,1,2,3",
@@ -527,6 +534,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000001 ",
"Counter": "0,1,2,3",
@@ -539,6 +547,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000001 ",
"Counter": "0,1,2,3",
@@ -551,6 +560,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000001 ",
"Counter": "0,1,2,3",
@@ -563,6 +573,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000001 ",
"Counter": "0,1,2,3",
@@ -575,6 +586,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000001 ",
"Counter": "0,1,2,3",
@@ -587,6 +599,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0002 ",
"Counter": "0,1,2,3",
@@ -599,6 +612,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000002 ",
"Counter": "0,1,2,3",
@@ -611,6 +625,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000002 ",
"Counter": "0,1,2,3",
@@ -623,6 +638,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000002 ",
"Counter": "0,1,2,3",
@@ -635,6 +651,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000002 ",
"Counter": "0,1,2,3",
@@ -647,6 +664,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000002 ",
"Counter": "0,1,2,3",
@@ -659,6 +677,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020004 ",
"Counter": "0,1,2,3",
@@ -671,6 +690,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0004 ",
"Counter": "0,1,2,3",
@@ -683,6 +703,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000004 ",
"Counter": "0,1,2,3",
@@ -695,6 +716,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000004 ",
"Counter": "0,1,2,3",
@@ -707,6 +729,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000004 ",
"Counter": "0,1,2,3",
@@ -719,6 +742,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000004 ",
"Counter": "0,1,2,3",
@@ -731,6 +755,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000004 ",
"Counter": "0,1,2,3",
@@ -743,6 +768,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000004 ",
"Counter": "0,1,2,3",
@@ -755,6 +781,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000004 ",
"Counter": "0,1,2,3",
@@ -767,6 +794,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000004 ",
"Counter": "0,1,2,3",
@@ -779,6 +807,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000004 ",
"Counter": "0,1,2,3",
@@ -791,6 +820,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000004 ",
"Counter": "0,1,2,3",
@@ -803,6 +833,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000004 ",
"Counter": "0,1,2,3",
@@ -815,6 +846,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020008 ",
"Counter": "0,1,2,3",
@@ -827,6 +859,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0008 ",
"Counter": "0,1,2,3",
@@ -839,6 +872,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000008 ",
"Counter": "0,1,2,3",
@@ -851,6 +885,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000008 ",
"Counter": "0,1,2,3",
@@ -863,6 +898,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000008 ",
"Counter": "0,1,2,3",
@@ -875,6 +911,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000008 ",
"Counter": "0,1,2,3",
@@ -887,6 +924,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000008 ",
"Counter": "0,1,2,3",
@@ -899,6 +937,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000008 ",
"Counter": "0,1,2,3",
@@ -911,6 +950,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000008 ",
"Counter": "0,1,2,3",
@@ -923,6 +963,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000008 ",
"Counter": "0,1,2,3",
@@ -935,6 +976,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000008 ",
"Counter": "0,1,2,3",
@@ -947,6 +989,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000008 ",
"Counter": "0,1,2,3",
@@ -959,6 +1002,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000008 ",
"Counter": "0,1,2,3",
@@ -971,6 +1015,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020010 ",
"Counter": "0,1,2,3",
@@ -983,6 +1028,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0010 ",
"Counter": "0,1,2,3",
@@ -995,6 +1041,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000010 ",
"Counter": "0,1,2,3",
@@ -1007,6 +1054,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000010 ",
"Counter": "0,1,2,3",
@@ -1019,6 +1067,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000010 ",
"Counter": "0,1,2,3",
@@ -1031,6 +1080,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000010 ",
"Counter": "0,1,2,3",
@@ -1043,6 +1093,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000010 ",
"Counter": "0,1,2,3",
@@ -1055,6 +1106,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000010 ",
"Counter": "0,1,2,3",
@@ -1067,6 +1119,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000010 ",
"Counter": "0,1,2,3",
@@ -1079,6 +1132,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000010 ",
"Counter": "0,1,2,3",
@@ -1091,6 +1145,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000010 ",
"Counter": "0,1,2,3",
@@ -1103,6 +1158,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000010 ",
"Counter": "0,1,2,3",
@@ -1115,6 +1171,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000010 ",
"Counter": "0,1,2,3",
@@ -1127,6 +1184,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020020 ",
"Counter": "0,1,2,3",
@@ -1139,6 +1197,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0020 ",
"Counter": "0,1,2,3",
@@ -1151,6 +1210,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000020 ",
"Counter": "0,1,2,3",
@@ -1163,6 +1223,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000020 ",
"Counter": "0,1,2,3",
@@ -1175,6 +1236,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000020 ",
"Counter": "0,1,2,3",
@@ -1187,6 +1249,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000020 ",
"Counter": "0,1,2,3",
@@ -1199,6 +1262,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000020 ",
"Counter": "0,1,2,3",
@@ -1211,6 +1275,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000020 ",
"Counter": "0,1,2,3",
@@ -1223,6 +1288,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000020 ",
"Counter": "0,1,2,3",
@@ -1235,6 +1301,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000020 ",
"Counter": "0,1,2,3",
@@ -1247,6 +1314,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000020 ",
"Counter": "0,1,2,3",
@@ -1259,6 +1327,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000020 ",
"Counter": "0,1,2,3",
@@ -1271,6 +1340,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000020 ",
"Counter": "0,1,2,3",
@@ -1283,6 +1353,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020040 ",
"Counter": "0,1,2,3",
@@ -1295,6 +1366,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0040 ",
"Counter": "0,1,2,3",
@@ -1307,6 +1379,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000040 ",
"Counter": "0,1,2,3",
@@ -1319,6 +1392,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000040 ",
"Counter": "0,1,2,3",
@@ -1331,6 +1405,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000040 ",
"Counter": "0,1,2,3",
@@ -1343,6 +1418,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000040 ",
"Counter": "0,1,2,3",
@@ -1355,6 +1431,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000040 ",
"Counter": "0,1,2,3",
@@ -1367,6 +1444,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000040 ",
"Counter": "0,1,2,3",
@@ -1379,6 +1457,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000040 ",
"Counter": "0,1,2,3",
@@ -1391,6 +1470,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000040 ",
"Counter": "0,1,2,3",
@@ -1403,6 +1483,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000040 ",
"Counter": "0,1,2,3",
@@ -1415,6 +1496,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000040 ",
"Counter": "0,1,2,3",
@@ -1427,6 +1509,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000040 ",
"Counter": "0,1,2,3",
@@ -1439,6 +1522,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020080 ",
"Counter": "0,1,2,3",
@@ -1451,6 +1535,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0080 ",
"Counter": "0,1,2,3",
@@ -1463,6 +1548,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000080 ",
"Counter": "0,1,2,3",
@@ -1475,6 +1561,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000080 ",
"Counter": "0,1,2,3",
@@ -1487,6 +1574,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000080 ",
"Counter": "0,1,2,3",
@@ -1499,6 +1587,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000080 ",
"Counter": "0,1,2,3",
@@ -1511,6 +1600,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000080 ",
"Counter": "0,1,2,3",
@@ -1523,6 +1613,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000080 ",
"Counter": "0,1,2,3",
@@ -1535,6 +1626,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000080 ",
"Counter": "0,1,2,3",
@@ -1547,6 +1639,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000080 ",
"Counter": "0,1,2,3",
@@ -1559,6 +1652,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000080 ",
"Counter": "0,1,2,3",
@@ -1571,6 +1665,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000080 ",
"Counter": "0,1,2,3",
@@ -1583,6 +1678,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000080 ",
"Counter": "0,1,2,3",
@@ -1595,6 +1691,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020100 ",
"Counter": "0,1,2,3",
@@ -1607,6 +1704,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0100 ",
"Counter": "0,1,2,3",
@@ -1619,6 +1717,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000100 ",
"Counter": "0,1,2,3",
@@ -1631,6 +1730,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000100 ",
"Counter": "0,1,2,3",
@@ -1643,6 +1743,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000100 ",
"Counter": "0,1,2,3",
@@ -1655,6 +1756,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000100 ",
"Counter": "0,1,2,3",
@@ -1667,6 +1769,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000100 ",
"Counter": "0,1,2,3",
@@ -1679,6 +1782,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000100 ",
"Counter": "0,1,2,3",
@@ -1691,6 +1795,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000100 ",
"Counter": "0,1,2,3",
@@ -1703,6 +1808,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000100 ",
"Counter": "0,1,2,3",
@@ -1715,6 +1821,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000100 ",
"Counter": "0,1,2,3",
@@ -1727,6 +1834,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000100 ",
"Counter": "0,1,2,3",
@@ -1739,6 +1847,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000100 ",
"Counter": "0,1,2,3",
@@ -1751,6 +1860,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020200 ",
"Counter": "0,1,2,3",
@@ -1763,6 +1873,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0200 ",
"Counter": "0,1,2,3",
@@ -1775,6 +1886,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000200 ",
"Counter": "0,1,2,3",
@@ -1787,6 +1899,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000200 ",
"Counter": "0,1,2,3",
@@ -1799,6 +1912,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000200 ",
"Counter": "0,1,2,3",
@@ -1811,6 +1925,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000200 ",
"Counter": "0,1,2,3",
@@ -1823,6 +1938,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000200 ",
"Counter": "0,1,2,3",
@@ -1835,6 +1951,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000200 ",
"Counter": "0,1,2,3",
@@ -1847,6 +1964,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000200 ",
"Counter": "0,1,2,3",
@@ -1859,6 +1977,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000200 ",
"Counter": "0,1,2,3",
@@ -1871,6 +1990,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000200 ",
"Counter": "0,1,2,3",
@@ -1883,6 +2003,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000200 ",
"Counter": "0,1,2,3",
@@ -1895,6 +2016,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000200 ",
"Counter": "0,1,2,3",
@@ -1907,6 +2029,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000028000 ",
"Counter": "0,1,2,3",
@@ -1919,6 +2042,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c8000 ",
"Counter": "0,1,2,3",
@@ -1931,6 +2055,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084008000 ",
"Counter": "0,1,2,3",
@@ -1943,6 +2068,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104008000 ",
"Counter": "0,1,2,3",
@@ -1955,6 +2081,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204008000 ",
"Counter": "0,1,2,3",
@@ -1967,6 +2094,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404008000 ",
"Counter": "0,1,2,3",
@@ -1979,6 +2107,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004008000 ",
"Counter": "0,1,2,3",
@@ -1991,6 +2120,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004008000 ",
"Counter": "0,1,2,3",
@@ -2003,6 +2133,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84008000 ",
"Counter": "0,1,2,3",
@@ -2015,6 +2146,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc008000 ",
"Counter": "0,1,2,3",
@@ -2027,6 +2159,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c008000 ",
"Counter": "0,1,2,3",
@@ -2039,6 +2172,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c008000 ",
"Counter": "0,1,2,3",
@@ -2051,6 +2185,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c008000 ",
"Counter": "0,1,2,3",
@@ -2063,6 +2198,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020090 ",
"Counter": "0,1,2,3",
@@ -2075,6 +2211,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0090 ",
"Counter": "0,1,2,3",
@@ -2087,6 +2224,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000090 ",
"Counter": "0,1,2,3",
@@ -2099,6 +2237,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000090 ",
"Counter": "0,1,2,3",
@@ -2111,6 +2250,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000090 ",
"Counter": "0,1,2,3",
@@ -2123,6 +2263,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000090 ",
"Counter": "0,1,2,3",
@@ -2135,6 +2276,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000090 ",
"Counter": "0,1,2,3",
@@ -2147,6 +2289,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000090 ",
"Counter": "0,1,2,3",
@@ -2159,6 +2302,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000090 ",
"Counter": "0,1,2,3",
@@ -2171,6 +2315,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000090 ",
"Counter": "0,1,2,3",
@@ -2183,6 +2328,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000090 ",
"Counter": "0,1,2,3",
@@ -2195,6 +2341,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000090 ",
"Counter": "0,1,2,3",
@@ -2207,6 +2354,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000090 ",
"Counter": "0,1,2,3",
@@ -2219,6 +2367,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020120 ",
"Counter": "0,1,2,3",
@@ -2231,6 +2380,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0120 ",
"Counter": "0,1,2,3",
@@ -2243,6 +2393,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000120 ",
"Counter": "0,1,2,3",
@@ -2255,6 +2406,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000120 ",
"Counter": "0,1,2,3",
@@ -2267,6 +2419,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000120 ",
"Counter": "0,1,2,3",
@@ -2279,6 +2432,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000120 ",
"Counter": "0,1,2,3",
@@ -2291,6 +2445,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000120 ",
"Counter": "0,1,2,3",
@@ -2303,6 +2458,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000120 ",
"Counter": "0,1,2,3",
@@ -2315,6 +2471,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000120 ",
"Counter": "0,1,2,3",
@@ -2327,6 +2484,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000120 ",
"Counter": "0,1,2,3",
@@ -2339,6 +2497,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000120 ",
"Counter": "0,1,2,3",
@@ -2351,6 +2510,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000120 ",
"Counter": "0,1,2,3",
@@ -2363,6 +2523,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000120 ",
"Counter": "0,1,2,3",
@@ -2375,6 +2536,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020240 ",
"Counter": "0,1,2,3",
@@ -2387,6 +2549,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0240 ",
"Counter": "0,1,2,3",
@@ -2399,6 +2562,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000240 ",
"Counter": "0,1,2,3",
@@ -2411,6 +2575,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000240 ",
"Counter": "0,1,2,3",
@@ -2423,6 +2588,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000240 ",
"Counter": "0,1,2,3",
@@ -2435,6 +2601,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000240 ",
"Counter": "0,1,2,3",
@@ -2447,6 +2614,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000240 ",
"Counter": "0,1,2,3",
@@ -2459,6 +2627,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000240 ",
"Counter": "0,1,2,3",
@@ -2471,6 +2640,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000240 ",
"Counter": "0,1,2,3",
@@ -2483,6 +2653,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000240 ",
"Counter": "0,1,2,3",
@@ -2495,6 +2666,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000240 ",
"Counter": "0,1,2,3",
@@ -2507,6 +2679,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000240 ",
"Counter": "0,1,2,3",
@@ -2519,6 +2692,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000240 ",
"Counter": "0,1,2,3",
@@ -2531,6 +2705,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020091 ",
"Counter": "0,1,2,3",
@@ -2543,6 +2718,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0091 ",
"Counter": "0,1,2,3",
@@ -2555,6 +2731,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000091 ",
"Counter": "0,1,2,3",
@@ -2567,6 +2744,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000091 ",
"Counter": "0,1,2,3",
@@ -2579,6 +2757,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000091 ",
"Counter": "0,1,2,3",
@@ -2591,6 +2770,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000091 ",
"Counter": "0,1,2,3",
@@ -2603,6 +2783,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000091 ",
"Counter": "0,1,2,3",
@@ -2615,6 +2796,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000091 ",
"Counter": "0,1,2,3",
@@ -2627,6 +2809,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000091 ",
"Counter": "0,1,2,3",
@@ -2639,6 +2822,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000091 ",
"Counter": "0,1,2,3",
@@ -2651,6 +2835,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000091 ",
"Counter": "0,1,2,3",
@@ -2663,6 +2848,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000091 ",
"Counter": "0,1,2,3",
@@ -2675,6 +2861,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000091 ",
"Counter": "0,1,2,3",
@@ -2687,6 +2874,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020122 ",
"Counter": "0,1,2,3",
@@ -2699,6 +2887,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0122 ",
"Counter": "0,1,2,3",
@@ -2711,6 +2900,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000122 ",
"Counter": "0,1,2,3",
@@ -2723,6 +2913,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000122 ",
"Counter": "0,1,2,3",
@@ -2735,6 +2926,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000122 ",
"Counter": "0,1,2,3",
@@ -2747,6 +2939,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000122 ",
"Counter": "0,1,2,3",
@@ -2759,6 +2952,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000122 ",
"Counter": "0,1,2,3",
@@ -2771,6 +2965,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000122 ",
"Counter": "0,1,2,3",
@@ -2783,6 +2978,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000122 ",
"Counter": "0,1,2,3",
@@ -2795,6 +2991,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000122 ",
"Counter": "0,1,2,3",
@@ -2807,6 +3004,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000122 ",
"Counter": "0,1,2,3",
@@ -2819,6 +3017,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000122 ",
"Counter": "0,1,2,3",
@@ -2831,6 +3030,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000122 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
index edf14f0d0eaf..4f829c5febbe 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 78913ae87703..97c5d0784c6c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -2,32 +2,42 @@
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
@@ -59,27 +69,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
"EventCode": "0x14",
"Counter": "0,1,2,3",
@@ -140,6 +161,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
"EventCode": "0x4c",
"Counter": "0,1,2,3",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"EventCode": "0x87",
"Counter": "0,1,2,3",
@@ -405,6 +497,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
"EventCode": "0x89",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "EventCode": "0xA0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -445,6 +556,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -455,6 +586,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -465,6 +616,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -475,6 +646,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -485,6 +676,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -495,6 +706,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -505,6 +736,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"EventCode": "0xA2",
"Counter": "0,1,2,3",
@@ -566,15 +837,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
@@ -588,17 +858,37 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -610,6 +900,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -621,6 +921,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
"EventCode": "0xA3",
"Counter": "2",
@@ -632,7 +963,16 @@
"CounterHTOff": "2"
},
{
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -642,6 +982,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of uops executed from any thread.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -662,36 +1074,64 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "BDM61",
- "EventName": "INST_RETIRED.ANY_P",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "BDM61",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -717,29 +1167,18 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"Data_LA": "1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -793,50 +1254,73 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDW98",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -847,17 +1331,17 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -868,29 +1352,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDW98",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
@@ -902,13 +1363,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_MISP_RETIRED.RET",
- "SampleAfterValue": "100007",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "EventCode": "0xCC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "EventCode": "0xA0",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
index 4301e6fbc5eb..2a015e4c7e21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -44,6 +44,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -117,6 +136,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
"EventCode": "0x4F",
"Counter": "0,1,2,3",
@@ -200,6 +238,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
"EventCode": "0xAE",
"Counter": "0,1,2,3",
@@ -251,61 +308,61 @@
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "UMask": "0x12",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x12",
+ "UMask": "0x14",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x22",
+ "UMask": "0x18",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x14",
+ "UMask": "0x21",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x24",
+ "UMask": "0x22",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x18",
+ "UMask": "0x24",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -327,62 +384,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 36fe398029b9..bf243fe2a0ec 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -11,11 +11,28 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -31,6 +48,43 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -71,6 +125,15 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x51",
"UMask": "0x1",
"BriefDescription": "L1D data line replacements",
@@ -153,12 +246,35 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -175,24 +291,24 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -266,7 +370,7 @@
"BriefDescription": "Demand and prefetch data reads",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -281,26 +385,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -308,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -346,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -371,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -445,77 +558,112 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x20",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
+ "Errata": "BDE70",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -657,118 +805,5 @@
"PublicDescription": "This event counts the number of split locks in the super queue.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
index 4ae1ea24f22f..d7b9d9c9c518 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -6,7 +6,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -25,7 +25,6 @@
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -35,7 +34,6 @@
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -43,9 +41,17 @@
},
{
"EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -55,7 +61,6 @@
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,19 +70,54 @@
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,7 +127,7 @@
"BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -97,7 +137,7 @@
"BriefDescription": "Number of SIMD FP assists due to Output values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -107,7 +147,7 @@
"BriefDescription": "Number of SIMD FP assists due to input values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -121,51 +161,5 @@
"PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
index 06bf0a40e568..72781e1e3362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -15,80 +15,49 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -99,7 +68,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -111,7 +80,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_OCCUR",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -122,7 +91,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,7 +102,17 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -144,7 +123,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -155,7 +134,39 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -165,7 +176,7 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -205,7 +216,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -268,18 +279,7 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
index cfa1e5876ec3..e44f73c24ac8 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -95,7 +95,6 @@
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
"EventName": "TX_EXEC.MISC1",
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -171,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -252,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
index 718fcb1db2ee..4475249ea9da 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 02b4e1035f2d..920c89da9111 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -3,31 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -60,22 +70,33 @@
},
{
"EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
"CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x14",
"UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
@@ -141,6 +162,26 @@
},
{
"EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
"UMask": "0x1",
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -406,6 +498,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xA1",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
@@ -446,6 +557,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +587,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +617,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +647,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +677,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +707,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +737,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,14 +838,13 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA3",
@@ -589,8 +859,18 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -600,6 +880,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x5",
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
@@ -611,6 +901,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x6",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
@@ -622,6 +922,37 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "2",
@@ -632,12 +963,41 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"UMask": "0x1",
"BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
"EventName": "LSD.UOPS",
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xB1",
"UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
@@ -662,35 +1074,63 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -718,23 +1168,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -746,7 +1185,7 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,44 +1255,67 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,18 +1325,18 @@
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -863,34 +1347,11 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "BDW98",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
@@ -902,13 +1363,13 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
index 5ce8b67ba076..7d79c707c6d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -45,6 +45,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
@@ -118,6 +137,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
@@ -201,6 +239,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -250,60 +307,60 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
@@ -327,62 +384,5 @@
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index d1d043829b95..bf0c51272068 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -11,11 +11,28 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -31,6 +48,43 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -71,6 +125,15 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x51",
"UMask": "0x1",
"BriefDescription": "L1D data line replacements",
@@ -153,12 +246,35 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -175,24 +291,24 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -266,7 +370,7 @@
"BriefDescription": "Demand and prefetch data reads",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -281,26 +385,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -308,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -346,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -371,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -445,84 +558,83 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -534,7 +646,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -546,7 +658,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -695,119 +807,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
@@ -816,6 +815,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -828,6 +828,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -840,6 +841,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -852,6 +854,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -864,6 +867,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -876,6 +880,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -888,6 +893,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -900,6 +906,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -912,6 +919,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -924,6 +932,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -936,6 +945,20 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3",
+ "MSRValue": "0x3f803c0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index 4ae1ea24f22f..d7b9d9c9c518 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -6,7 +6,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -25,7 +25,6 @@
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -35,7 +34,6 @@
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -43,9 +41,17 @@
},
{
"EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -55,7 +61,6 @@
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,19 +70,54 @@
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,7 +127,7 @@
"BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -97,7 +137,7 @@
"BriefDescription": "Number of SIMD FP assists due to Output values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -107,7 +147,7 @@
"BriefDescription": "Number of SIMD FP assists due to input values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -121,51 +161,5 @@
"PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
index 06bf0a40e568..72781e1e3362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -15,80 +15,49 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -99,7 +68,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -111,7 +80,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_OCCUR",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -122,7 +91,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,7 +102,17 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -144,7 +123,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -155,7 +134,39 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -165,7 +176,7 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -205,7 +216,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -268,18 +279,7 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index 1204ea8ff30d..d79a5cfea44b 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -95,7 +95,6 @@
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
"EventName": "TX_EXEC.MISC1",
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -171,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -252,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -439,6 +438,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,6 +451,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -463,6 +464,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -475,6 +477,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -487,6 +490,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -499,6 +503,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -511,6 +516,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -523,6 +529,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -535,6 +542,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -547,6 +555,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -559,6 +568,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -571,6 +581,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -583,6 +594,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -595,6 +607,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -607,6 +620,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -619,6 +633,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -631,6 +646,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -643,6 +659,20 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "MSRValue": "0x3fbfc00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
index 718fcb1db2ee..4475249ea9da 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 02b4e1035f2d..920c89da9111 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -3,31 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -60,22 +70,33 @@
},
{
"EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
"CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x14",
"UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
@@ -141,6 +162,26 @@
},
{
"EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
"UMask": "0x1",
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -406,6 +498,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xA1",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
@@ -446,6 +557,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +587,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +617,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +647,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +677,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +707,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +737,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,14 +838,13 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA3",
@@ -589,8 +859,18 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -600,6 +880,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x5",
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
@@ -611,6 +901,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x6",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
@@ -622,6 +922,37 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "2",
@@ -632,12 +963,41 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"UMask": "0x1",
"BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
"EventName": "LSD.UOPS",
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xB1",
"UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
@@ -662,35 +1074,63 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -718,23 +1168,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -746,7 +1185,7 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,44 +1255,67 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,18 +1325,18 @@
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -863,34 +1347,11 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "BDW98",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
@@ -902,13 +1363,13 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
index 5ce8b67ba076..7d79c707c6d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -45,6 +45,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
@@ -118,6 +137,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
@@ -201,6 +239,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -250,60 +307,60 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
@@ -327,62 +384,5 @@
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index 4e02e1e5e70d..f8bbe087b0f8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -1,6 +1,26 @@
[
{
"CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache request misses"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
"EventCode": "0x30",
"Counter": "0,1,2,3",
@@ -11,120 +31,119 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
"EventCode": "0x31",
"Counter": "0,1,2,3",
"UMask": "0x0",
"EventName": "CORE_REJECT_L2Q.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the L2Q "
+ "BriefDescription": "Requests rejected by the L2Q"
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
- "EventCode": "0x2E",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
+ "EventCode": "0x51",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "UMask": "0x1",
+ "EventName": "DL1.DIRTY_EVICTION",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache requests"
+ "BriefDescription": "L1 Cache evictions for dirty data"
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
- "EventCode": "0x2E",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "UMask": "0x2",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache request misses"
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that an ICache miss is outstanding, and instruction fetch is stalled. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes, while an Icache miss outstanding. Note this event is not the same as cycles to retrieve an instruction due to an Icache miss. Rather, it is the part of the Instruction Cache (ICache) miss time where no bytes are available for the decoder.",
- "EventCode": "0x86",
+ "EventCode": "0xB7",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where code-fetch is stalled and an ICache miss is outstanding. This is not the same as an ICache Miss."
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of load uops retired.",
+ "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of store uops retired.",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x83",
- "EventName": "MEM_UOPS_RETIRED.ALL",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts locked memory uops retired. This includes \"regular\" locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "UMask": "0x43",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of load uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of store uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x43",
- "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "UMask": "0x83",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)"
},
{
"PEBS": "2",
@@ -140,24 +159,24 @@
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
},
{
"PEBS": "2",
@@ -205,24 +224,20 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DL1.DIRTY_EVICTION",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1 Cache evictions for dirty data"
- },
- {
- "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
+ "MSRValue": "0x40000032b7 ",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
+ "MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x36000032b7 ",
"Counter": "0,1,2,3",
@@ -234,6 +249,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x10000032b7 ",
"Counter": "0,1,2,3",
@@ -245,6 +262,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x04000032b7 ",
"Counter": "0,1,2,3",
@@ -256,6 +275,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x02000032b7 ",
"Counter": "0,1,2,3",
@@ -267,6 +288,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x00000432b7 ",
"Counter": "0,1,2,3",
@@ -278,6 +301,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000132b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000022 ",
"Counter": "0,1,2,3",
@@ -289,6 +340,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000022 ",
"Counter": "0,1,2,3",
@@ -300,6 +353,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000022 ",
"Counter": "0,1,2,3",
@@ -311,6 +366,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000022 ",
"Counter": "0,1,2,3",
@@ -322,6 +379,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040022 ",
"Counter": "0,1,2,3",
@@ -333,6 +392,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003091",
"Counter": "0,1,2,3",
@@ -344,6 +431,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003091",
"Counter": "0,1,2,3",
@@ -355,6 +444,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400003091",
"Counter": "0,1,2,3",
@@ -366,6 +457,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003091",
"Counter": "0,1,2,3",
@@ -377,6 +470,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043091",
"Counter": "0,1,2,3",
@@ -388,6 +483,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003010 ",
"Counter": "0,1,2,3",
@@ -399,6 +522,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003010 ",
"Counter": "0,1,2,3",
@@ -410,6 +535,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400003010 ",
"Counter": "0,1,2,3",
@@ -421,6 +548,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003010 ",
"Counter": "0,1,2,3",
@@ -432,6 +561,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043010 ",
"Counter": "0,1,2,3",
@@ -443,6 +574,47 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000008000 ",
"Counter": "0,1,2,3",
@@ -454,6 +626,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400008000 ",
"Counter": "0,1,2,3",
@@ -465,6 +639,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200008000 ",
"Counter": "0,1,2,3",
@@ -476,6 +652,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000048000 ",
"Counter": "0,1,2,3",
@@ -487,6 +665,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000018000 ",
"Counter": "0,1,2,3",
@@ -498,6 +678,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600004800 ",
"Counter": "0,1,2,3",
@@ -509,6 +704,47 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044800 ",
"Counter": "0,1,2,3",
@@ -520,6 +756,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600004000 ",
"Counter": "0,1,2,3",
@@ -531,6 +795,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000004000 ",
"Counter": "0,1,2,3",
@@ -542,6 +808,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400004000 ",
"Counter": "0,1,2,3",
@@ -553,6 +821,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200004000 ",
"Counter": "0,1,2,3",
@@ -564,6 +834,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044000 ",
"Counter": "0,1,2,3",
@@ -575,6 +847,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600002000 ",
"Counter": "0,1,2,3",
@@ -586,6 +886,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000002000 ",
"Counter": "0,1,2,3",
@@ -597,6 +899,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400002000 ",
"Counter": "0,1,2,3",
@@ -608,6 +912,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200002000 ",
"Counter": "0,1,2,3",
@@ -619,6 +925,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000042000 ",
"Counter": "0,1,2,3",
@@ -630,6 +938,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000012000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600001000 ",
"Counter": "0,1,2,3",
@@ -641,6 +977,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000001000 ",
"Counter": "0,1,2,3",
@@ -652,6 +990,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400001000 ",
"Counter": "0,1,2,3",
@@ -663,6 +1003,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200001000 ",
"Counter": "0,1,2,3",
@@ -674,6 +1016,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000041000 ",
"Counter": "0,1,2,3",
@@ -685,6 +1029,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000011000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000800 ",
"Counter": "0,1,2,3",
@@ -696,6 +1068,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000800 ",
"Counter": "0,1,2,3",
@@ -707,6 +1081,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000800 ",
"Counter": "0,1,2,3",
@@ -718,6 +1094,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000800 ",
"Counter": "0,1,2,3",
@@ -729,6 +1107,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040800 ",
"Counter": "0,1,2,3",
@@ -740,6 +1120,99 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010400 ",
"Counter": "0,1,2,3",
@@ -751,6 +1224,112 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000100 ",
"Counter": "0,1,2,3",
@@ -762,6 +1341,86 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000080 ",
"Counter": "0,1,2,3",
@@ -773,6 +1432,86 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000020 ",
"Counter": "0,1,2,3",
@@ -784,6 +1523,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000020 ",
"Counter": "0,1,2,3",
@@ -795,6 +1536,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000020 ",
"Counter": "0,1,2,3",
@@ -806,6 +1549,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000020 ",
"Counter": "0,1,2,3",
@@ -817,6 +1562,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040020 ",
"Counter": "0,1,2,3",
@@ -828,6 +1575,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000010 ",
"Counter": "0,1,2,3",
@@ -839,6 +1614,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000010 ",
"Counter": "0,1,2,3",
@@ -850,6 +1627,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000010 ",
"Counter": "0,1,2,3",
@@ -861,6 +1640,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000010 ",
"Counter": "0,1,2,3",
@@ -872,6 +1653,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040010 ",
"Counter": "0,1,2,3",
@@ -883,6 +1666,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000008 ",
"Counter": "0,1,2,3",
@@ -894,6 +1705,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000008 ",
"Counter": "0,1,2,3",
@@ -905,6 +1718,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000008 ",
"Counter": "0,1,2,3",
@@ -916,6 +1731,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000008 ",
"Counter": "0,1,2,3",
@@ -927,6 +1744,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040008 ",
"Counter": "0,1,2,3",
@@ -938,6 +1757,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000004 ",
"Counter": "0,1,2,3",
@@ -949,6 +1783,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000004 ",
"Counter": "0,1,2,3",
@@ -960,6 +1796,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000004 ",
"Counter": "0,1,2,3",
@@ -971,6 +1822,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000004 ",
"Counter": "0,1,2,3",
@@ -982,6 +1835,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040004 ",
"Counter": "0,1,2,3",
@@ -993,6 +1848,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000002 ",
"Counter": "0,1,2,3",
@@ -1004,6 +1874,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000002 ",
"Counter": "0,1,2,3",
@@ -1015,6 +1887,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000002 ",
"Counter": "0,1,2,3",
@@ -1026,6 +1900,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000002 ",
"Counter": "0,1,2,3",
@@ -1037,6 +1913,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000002 ",
"Counter": "0,1,2,3",
@@ -1048,6 +1926,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040002 ",
"Counter": "0,1,2,3",
@@ -1059,6 +1939,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000001 ",
"Counter": "0,1,2,3",
@@ -1070,6 +1965,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000001 ",
"Counter": "0,1,2,3",
@@ -1081,6 +1978,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000001 ",
"Counter": "0,1,2,3",
@@ -1092,6 +1991,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000001 ",
"Counter": "0,1,2,3",
@@ -1103,6 +2004,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000001 ",
"Counter": "0,1,2,3",
@@ -1114,6 +2017,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040001 ",
"Counter": "0,1,2,3",
@@ -1123,5 +2028,18 @@
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
"Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index ac8b0d365a19..690cebd12a94 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -1,15 +1,5 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "200003",
- "BriefDescription": "Machine clears due to memory ordering issue"
- },
- {
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
@@ -30,5 +20,275 @@
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops that split a page (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved as another core is in the process of modifying the data.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Machine clears due to memory ordering issue"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x20000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
index df25ca9542f1..959cadd7cb0e 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -1,23 +1,23 @@
[
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "EventCode": "0xCA",
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "UMask": "0x0",
+ "EventName": "FETCH_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ "BriefDescription": "Cycles code-fetch stalled due to any reason."
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "EventCode": "0xCA",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "UMask": "0x1",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle to recover"
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
},
{
"CollectPEBSRecord": "1",
@@ -30,14 +30,44 @@
"BriefDescription": "Unfilled issue slots per cycle"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle to recover"
+ },
+ {
"CollectPEBSRecord": "2",
"PublicDescription": "Counts hardware interrupts received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "203",
+ "BriefDescription": "Hardware interrupts received"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_INTERRUPTS.MASKED",
"SampleAfterValue": "200003",
- "BriefDescription": "Hardware interrupts received (Precise event capable)"
+ "BriefDescription": "Cycles hardware interrupts are masked"
},
{
"CollectPEBSRecord": "2",
@@ -47,6 +77,6 @@
"UMask": "0x4",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles pending interrupts are masked (Precise event capable)"
+ "BriefDescription": "Cycles pending interrupts are masked"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 07f00041f56f..254788af8ab6 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,168 +1,136 @@
[
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired branch instructions (Precise event capable)"
- },
- {
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_INST_RETIRED.JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 0",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Fixed event)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf9",
- "EventName": "BR_INST_RETIRED.CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired near call instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near relative CALL branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xfd",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect CALL branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_INST_RETIRED.IND_CALL",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near return branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_INST_RETIRED.RETURN",
+ "UMask": "0x4",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near return instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xbf",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked (Precise event capable)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
- },
- {
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_MISP_RETIRED.JCC",
+ "EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ "BriefDescription": "Uops issued to the back end per cycle"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a programmable general purpose performance counter.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "EventCode": "0x9C",
"Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_MISP_RETIRED.RETURN",
+ "UMask": "0x0",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
},
{
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Precise event capable)"
},
{
"PEBS": "2",
@@ -187,8 +155,40 @@
"BriefDescription": "MS uops retired (Precise event capable)"
},
{
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of floating point divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Floating point divide uops retired. (Precise Event Capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Integer divide uops retired. (Precise Event Capable)"
+ },
+ {
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel? architecture processors.",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All machine clears"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -217,217 +217,239 @@
"BriefDescription": "Machine clears due to memory disambiguation"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears for any reason.",
- "EventCode": "0xC3",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "MACHINE_CLEARS.ALL",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "All machine clears"
+ "BriefDescription": "Retired branch instructions (Precise event capable)"
},
{
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
- "EventCode": "0xC0",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Precise event capable)"
+ "UMask": "0x7e",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
},
{
+ "PEBS": "2",
"CollectPEBSRecord": "1",
- "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
- "EventCode": "0x9C",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "UMask": "0x80",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
- "EventCode": "0x0E",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0xbf",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops issued to the back end per cycle"
+ "BriefDescription": "Retired far branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles if either divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a divider is busy"
+ "UMask": "0xeb",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the integer divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "UMask": "0xf7",
+ "EventName": "BR_INST_RETIRED.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles the integer divide unit is busy"
+ "BriefDescription": "Retired near return instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "UMask": "0xf9",
+ "EventName": "BR_INST_RETIRED.CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles the FP divide unit is busy"
+ "BriefDescription": "Retired near call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfd",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted"
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted"
+ "UMask": "0x7e",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ALL",
+ "UMask": "0xeb",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for any branch type"
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on return instructions.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BACLEARS.RETURN",
+ "UMask": "0xf7",
+ "EventName": "BR_MISP_RETIRED.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for return branch"
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BACLEARS.COND",
+ "UMask": "0xfb",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for conditional branch"
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
- "EventCode": "0x03",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "UMask": "0xfe",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked (Precise event capable)"
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "EventCode": "0xCD",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.UTLB_MISS",
+ "UMask": "0x0",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a divider is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ "BriefDescription": "Cycles the integer divide unit is busy"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ "BriefDescription": "Cycles the FP divide unit is busy"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "EventName": "BACLEARS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for any branch type"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "LD_BLOCKS.4K_ALIAS",
+ "UMask": "0x8",
+ "EventName": "BACLEARS.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for return branch"
},
{
- "PEBS": "2",
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "UMask": "0x10",
+ "EventName": "BACLEARS.COND",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for conditional branch"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 3202c4478836..9805198d3f5f 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -1,6 +1,36 @@
[
{
"CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of D-side page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of I-side pagewalks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"EventCode": "0x81",
"Counter": "0,1,2,3",
@@ -41,35 +71,5 @@
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of D-side page-walks in cycles"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of I-side pagewalks in cycles"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of page-walks in cycles"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index bfb5ebf48c54..da4d6ddd4f92 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -11,14 +11,34 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
"Errata": "HSD78",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "Demand requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -32,6 +52,48 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "All requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -73,6 +135,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Demand requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -83,6 +156,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "All requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Not rejected writebacks that hit L2 cache.",
"EventCode": "0x27",
"Counter": "0,1,2,3",
@@ -124,6 +208,27 @@
},
{
"EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
@@ -133,13 +238,13 @@
},
{
"EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
"CounterMask": "1",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
@@ -163,6 +268,28 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
@@ -185,46 +312,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "UMask": "0x4",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "UMask": "0x8",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -289,6 +405,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
@@ -296,7 +421,7 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -308,7 +433,7 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -321,31 +446,33 @@
"Errata": "HSD76, HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -358,19 +485,20 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts all store uops retired. This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -401,20 +529,20 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -427,20 +555,18 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "HSD29, HSM30",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -477,25 +603,27 @@
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -513,14 +641,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "HSD74, HSD29, HSD25, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100003",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -665,6 +792,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "",
"EventCode": "0xf4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -674,131 +802,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand requests to L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "All requests that missed L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "All requests to L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c8fff",
"Counter": "0,1,2,3",
@@ -811,6 +815,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c07f7",
"Counter": "0,1,2,3",
@@ -823,6 +828,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c07f7",
"Counter": "0,1,2,3",
@@ -835,6 +841,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0244",
"Counter": "0,1,2,3",
@@ -847,6 +854,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0122",
"Counter": "0,1,2,3",
@@ -859,6 +867,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0122",
"Counter": "0,1,2,3",
@@ -871,6 +880,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0091",
"Counter": "0,1,2,3",
@@ -883,6 +893,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0091",
"Counter": "0,1,2,3",
@@ -895,6 +906,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0200",
"Counter": "0,1,2,3",
@@ -907,6 +919,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0100",
"Counter": "0,1,2,3",
@@ -919,6 +932,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0080",
"Counter": "0,1,2,3",
@@ -931,6 +945,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0040",
"Counter": "0,1,2,3",
@@ -943,6 +958,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0020",
"Counter": "0,1,2,3",
@@ -955,6 +971,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0010",
"Counter": "0,1,2,3",
@@ -967,6 +984,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0004",
"Counter": "0,1,2,3",
@@ -979,6 +997,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0004",
"Counter": "0,1,2,3",
@@ -991,6 +1010,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0002",
"Counter": "0,1,2,3",
@@ -1003,6 +1023,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0002",
"Counter": "0,1,2,3",
@@ -1015,6 +1036,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0001",
"Counter": "0,1,2,3",
@@ -1027,6 +1049,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0001",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index 1732fa49c6d2..f9843e5a9b42 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -20,6 +20,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "EventCode": "0xC6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "AVX_INSTS.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of X87 FP assists due to output values.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
@@ -69,15 +79,5 @@
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
- "EventCode": "0xC6",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "AVX_INSTS.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
index 57a1ce46971f..c0a5bedcc15c 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -21,74 +21,43 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -135,6 +104,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -157,6 +136,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -195,6 +206,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
@@ -270,25 +290,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index aab981b42339..e5f9fa6655b3 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -401,6 +401,7 @@
"CounterHTOff": "3"
},
{
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc08fff",
"Counter": "0,1,2,3",
@@ -413,6 +414,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01004007f7",
"Counter": "0,1,2,3",
@@ -425,6 +427,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc007f7",
"Counter": "0,1,2,3",
@@ -437,6 +440,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400244",
"Counter": "0,1,2,3",
@@ -449,6 +453,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00244",
"Counter": "0,1,2,3",
@@ -461,6 +466,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400122",
"Counter": "0,1,2,3",
@@ -473,6 +479,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00122",
"Counter": "0,1,2,3",
@@ -485,6 +492,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400091",
"Counter": "0,1,2,3",
@@ -497,6 +505,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00091",
"Counter": "0,1,2,3",
@@ -509,6 +518,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00200",
"Counter": "0,1,2,3",
@@ -521,6 +531,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00100",
"Counter": "0,1,2,3",
@@ -533,6 +544,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00080",
"Counter": "0,1,2,3",
@@ -545,6 +557,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00040",
"Counter": "0,1,2,3",
@@ -557,6 +570,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00020",
"Counter": "0,1,2,3",
@@ -569,6 +583,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00010",
"Counter": "0,1,2,3",
@@ -581,6 +596,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400004",
"Counter": "0,1,2,3",
@@ -593,6 +609,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00004",
"Counter": "0,1,2,3",
@@ -605,6 +622,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400002",
"Counter": "0,1,2,3",
@@ -617,6 +635,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00002",
"Counter": "0,1,2,3",
@@ -629,6 +648,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
@@ -641,6 +661,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00001",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
index 85d6a14baf9d..8a4d898d76c1 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x5C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -31,6 +21,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index 0099848607ad..a4dcfce4a512 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -2,33 +2,43 @@
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"Errata": "HSD140, HSD143",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
@@ -67,7 +77,19 @@
"UMask": "0x3",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -82,6 +104,29 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -112,35 +157,32 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ARITH.DIVIDER_UOPS",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -154,6 +196,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -163,6 +237,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4c",
"Counter": "0,1,2,3",
@@ -233,6 +316,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
"EventCode": "0x87",
"Counter": "0,1,2,3",
@@ -409,6 +504,15 @@
{
"EventCode": "0x89",
"Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
"UMask": "0xc1",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -445,6 +549,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -455,6 +579,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -465,6 +609,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -475,6 +638,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -485,6 +667,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -495,6 +697,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -505,6 +727,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -515,6 +757,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles allocation is stalled due to resource related reason.",
"EventCode": "0xA2",
"Counter": "0,1,2,3",
@@ -566,17 +827,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
"PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -594,7 +844,7 @@
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -621,6 +871,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -642,14 +903,23 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
- "EventCode": "0xB1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -665,24 +935,127 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of instructions at retirement.",
- "EventCode": "0xC0",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "HSD11, HSD140",
- "EventName": "INST_RETIRED.ANY_P",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instructions at retirement.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "HSD11, HSD140",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -698,6 +1071,16 @@
"CounterHTOff": "1"
},
{
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
@@ -709,7 +1092,6 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -720,17 +1102,6 @@
"Data_LA": "1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -765,6 +1136,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -774,6 +1155,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -793,8 +1185,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -814,18 +1215,27 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -846,7 +1256,6 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -866,14 +1275,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
@@ -886,16 +1295,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "2",
"PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
"EventCode": "0xC5",
@@ -903,121 +1302,11 @@
"UMask": "0x4",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -1027,51 +1316,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "EventCode": "0xCC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -1082,248 +1334,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
index ce80a08d0f08..777b500a5c9f 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -39,6 +39,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -69,6 +79,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -118,6 +138,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -148,6 +178,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -206,6 +246,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
@@ -236,6 +286,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
"EventCode": "0xae",
"Counter": "0,1,2,3",
@@ -256,41 +316,45 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "UMask": "0x12",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "UMask": "0x14",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "UMask": "0x18",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "UMask": "0x21",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"CounterHTOff": "0,1,2,3"
},
{
@@ -304,43 +368,43 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "UMask": "0x24",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "UMask": "0x28",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x14",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "UMask": "0x41",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "UMask": "0x42",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -355,41 +419,37 @@
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "UMask": "0x48",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "UMask": "0x81",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x28",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "UMask": "0x82",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x48",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "UMask": "0x84",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -420,65 +480,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks in ITLB of any page size.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index f1bae0817a6f..b2fbd617306a 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -12,12 +12,32 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"Errata": "HSD78",
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -33,6 +53,48 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests that missed L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -74,6 +136,17 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -83,6 +156,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -124,6 +208,27 @@
},
{
"EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"UMask": "0x2",
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
"Counter": "0,1,2,3",
@@ -133,13 +238,13 @@
},
{
"EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x51",
@@ -164,6 +269,28 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
@@ -186,23 +313,23 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "HSD62, HSD61",
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "Errata": "HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -218,17 +345,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -289,9 +405,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -303,20 +428,20 @@
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -328,32 +453,34 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"Errata": "HSD29, HSM30",
+ "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
+ "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -365,14 +492,15 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
+ "PublicDescription": "This event counts all store uops retired. This is a precise event.",
+ "SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
@@ -402,13 +530,13 @@
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -421,20 +549,19 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"Errata": "HSM30",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"Errata": "HSD29, HSM30",
- "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -447,7 +574,6 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -478,24 +604,26 @@
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
@@ -514,20 +642,19 @@
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "HSD74, HSD29, HSD25, HSM30",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -539,7 +666,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -551,7 +678,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -706,135 +833,11 @@
"BriefDescription": "Split locks in SQ",
"Counter": "0,1,2,3",
"EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "Errata": "HSD78",
- "PublicDescription": "All requests that missed L2.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "All requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "HSD78, HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
@@ -843,6 +846,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -855,6 +859,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -867,6 +872,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -879,6 +885,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -891,6 +898,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -903,6 +911,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -915,6 +924,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -927,6 +937,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -939,6 +950,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -951,6 +963,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -963,6 +976,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -975,6 +989,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -987,6 +1002,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -999,6 +1015,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1011,6 +1028,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1023,6 +1041,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1035,6 +1054,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1047,6 +1067,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1059,6 +1080,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1071,6 +1093,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
index 6282aed6e090..bc08cc1f2f7e 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -20,6 +20,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC6",
+ "UMask": "0x7",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "Counter": "0,1,2,3",
+ "EventName": "AVX_INSTS.ALL",
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
@@ -69,15 +79,5 @@
"PublicDescription": "Cycles with any input/output SSE* or FP assists.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC6",
- "UMask": "0x7",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "Counter": "0,1,2,3",
- "EventName": "AVX_INSTS.ALL",
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
index 2d0c7aac1e61..a4d9f1fcf940 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -22,72 +22,41 @@
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -136,6 +105,16 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x24",
"BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
@@ -158,6 +137,38 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x3c",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
@@ -195,6 +206,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
@@ -270,25 +290,5 @@
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 0886cc000d22..56b0f24b8029 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -409,6 +409,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -421,6 +422,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -433,6 +435,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -445,6 +448,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -457,6 +461,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -469,6 +474,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -481,6 +487,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -493,6 +500,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -505,6 +513,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -517,6 +526,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -529,6 +539,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -541,6 +552,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -553,6 +565,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -565,6 +578,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -577,6 +591,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -589,6 +604,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -601,6 +617,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -613,6 +630,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -625,6 +643,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -637,6 +656,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -649,6 +669,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -661,6 +682,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -673,6 +695,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -685,6 +708,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -697,6 +721,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -709,6 +734,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -721,6 +747,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -733,6 +760,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
index 4e1b6ce96ca3..800e65df31bc 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -31,6 +21,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index c3a163d34bd7..8a18bfe9e3e4 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -3,32 +3,42 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"Errata": "HSD140, HSD143",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -63,7 +73,7 @@
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"CounterMask": "1",
@@ -72,6 +82,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x1",
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
@@ -82,6 +104,29 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -112,34 +157,31 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "EventCode": "0x14",
+ "UMask": "0x2",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "AnyThread": "1",
- "CounterMask": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
- "UMask": "0x2",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "0,1,2,3",
- "EventName": "ARITH.DIVIDER_UOPS",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -154,6 +196,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -163,6 +237,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -233,6 +316,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -408,6 +503,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
@@ -446,6 +550,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +580,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +610,25 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +639,25 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +668,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +698,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +728,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +757,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,17 +828,6 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
"UMask": "0x2",
"BriefDescription": "Cycles with pending memory loads.",
"Counter": "0,1,2,3",
@@ -590,7 +840,7 @@
{
"EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -622,6 +872,17 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls due to L1 data cache misses",
"Counter": "2",
@@ -642,13 +903,22 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE",
- "Errata": "HSD30, HSM31",
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -665,23 +935,126 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "HSD11, HSD140",
- "PublicDescription": "Number of instructions at retirement.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "Errata": "HSD144, HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "Errata": "HSD30, HSM31",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "HSD11, HSD140",
+ "PublicDescription": "Number of instructions at retirement.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -698,6 +1071,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -715,18 +1098,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -765,6 +1136,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -774,6 +1155,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,12 +1186,21 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
"BriefDescription": "Conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -815,13 +1216,23 @@
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
"BriefDescription": "All (macro) branch instructions retired.",
+ "PEBS": "2",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Branch instructions at retirement.",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC4",
@@ -830,7 +1241,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts the number of near return instructions retired.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -851,7 +1261,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near taken branches retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -866,14 +1275,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired.",
- "PEBS": "2",
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
@@ -887,18 +1296,8 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC5",
"UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
@@ -907,171 +1306,24 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x20",
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1082,248 +1334,5 @@
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
index 9c00f8ef6a07..168df552b1a8 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -40,6 +40,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -70,6 +80,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x80",
"BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
"Counter": "0,1,2,3",
@@ -119,6 +139,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -149,6 +179,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x80",
"BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
"Counter": "0,1,2,3",
@@ -207,6 +247,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -236,6 +286,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xae",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -257,39 +317,43 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x41",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x81",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -305,41 +369,41 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x42",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x82",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "UMask": "0x28",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "UMask": "0x41",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "UMask": "0x42",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -354,41 +418,37 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x84",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "UMask": "0x48",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "UMask": "0x81",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of DTLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x28",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "UMask": "0x82",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x48",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "UMask": "0x84",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -420,65 +480,5 @@
"PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks in ITLB of any page size.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index f1ee6d4853c5..999a01bc6467 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -10,6 +10,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "RFO requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -30,6 +40,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -50,6 +70,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,36 +100,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 store RFO requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 code requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -219,6 +219,29 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -239,76 +262,87 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -379,7 +413,7 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -389,7 +423,7 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -399,7 +433,7 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -409,7 +443,7 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -419,7 +453,7 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -429,7 +463,7 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,67 +473,61 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source is LLC miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -510,61 +538,56 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -753,50 +776,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index de72b84b3536..efaa949ead31 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -20,76 +20,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -138,6 +107,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -160,6 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -206,7 +218,7 @@
"UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"CounterHTOff": "0,1,2,3"
},
{
@@ -289,17 +301,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
index e1c6a1d4a4d5..a74d54f56192 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -39,18 +39,6 @@
},
{
"PEBS": "2",
- "EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
- "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
- "PEBS": "2",
"PublicDescription": "Loads with latency value being above 4.",
"EventCode": "0xCD",
"MSRValue": "0x4",
@@ -162,6 +150,18 @@
"CounterHTOff": "3"
},
{
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x300400244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
index 9c2dd0511a32..4eb83ee40412 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 2145c28193f7..0afbfd95ea30 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,30 +1,41 @@
[
{
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -78,6 +89,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -175,6 +197,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -187,6 +220,36 @@
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -194,6 +257,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4C",
"Counter": "0,1,2,3",
@@ -216,37 +288,37 @@
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -260,6 +332,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -498,118 +582,118 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "UMask": "0x30",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -662,15 +746,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
@@ -684,13 +767,33 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Total execution stalls.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -708,6 +811,16 @@
{
"EventCode": "0xA3",
"Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
"UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
"SampleAfterValue": "2000003",
@@ -716,6 +829,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -727,6 +871,16 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -747,6 +901,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -757,6 +922,61 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -767,15 +987,59 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of instructions at retirement.",
@@ -809,24 +1073,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Counts the number of retirement slots used each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. ",
+ "BriefDescription": "Retired uops.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -864,6 +1116,27 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of self-modifying-code machine clears detected.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -880,50 +1153,67 @@
"UMask": "0x20",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. ",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. ",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -933,18 +1223,17 @@
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired. ",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. ",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -954,28 +1243,7 @@
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -990,13 +1258,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Mispredicted taken branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1010,6 +1277,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Count cases of saving new LBR records by hardware.",
"EventCode": "0xCC",
"Counter": "0,1,2,3",
@@ -1028,280 +1305,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
index f036f5398906..f243551b4d12 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -1,5 +1,35 @@
[
{
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x88",
@@ -146,35 +176,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index ff27a620edd8..6dad3ad6b102 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -10,6 +10,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "RFO requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -30,6 +40,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -50,6 +70,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,36 +100,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 store RFO requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 code requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -219,6 +219,29 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -239,76 +262,87 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -379,7 +413,7 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -389,7 +423,7 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -399,7 +433,7 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -409,7 +443,7 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -419,7 +453,7 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -429,7 +463,7 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,67 +473,61 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source is LLC miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -510,67 +538,61 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "UMask": "0x3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "BriefDescription": "Retired load uops whose data source was local DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
"CounterHTOff": "0,1,2,3"
},
{
@@ -780,40 +802,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x4003c0091",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
index de72b84b3536..efaa949ead31 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -20,76 +20,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -138,6 +107,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -160,6 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -206,7 +218,7 @@
"UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"CounterHTOff": "0,1,2,3"
},
{
@@ -289,17 +301,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
index 437d98f3e344..3a7b86af8816 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -30,18 +30,6 @@
},
{
"PEBS": "2",
- "EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
- "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
- "PEBS": "2",
"PublicDescription": "Loads with latency value being above 4.",
"EventCode": "0xCD",
"MSRValue": "0x4",
@@ -153,6 +141,18 @@
"CounterHTOff": "3"
},
{
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
index 9c2dd0511a32..4eb83ee40412 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 2145c28193f7..0afbfd95ea30 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,30 +1,41 @@
[
{
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -78,6 +89,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -175,6 +197,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -187,6 +220,36 @@
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -194,6 +257,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4C",
"Counter": "0,1,2,3",
@@ -216,37 +288,37 @@
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -260,6 +332,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -498,118 +582,118 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "UMask": "0x30",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -662,15 +746,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
@@ -684,13 +767,33 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Total execution stalls.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -708,6 +811,16 @@
{
"EventCode": "0xA3",
"Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
"UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
"SampleAfterValue": "2000003",
@@ -716,6 +829,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -727,6 +871,16 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -747,6 +901,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -757,6 +922,61 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -767,15 +987,59 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of instructions at retirement.",
@@ -809,24 +1073,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Counts the number of retirement slots used each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. ",
+ "BriefDescription": "Retired uops.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -864,6 +1116,27 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of self-modifying-code machine clears detected.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -880,50 +1153,67 @@
"UMask": "0x20",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. ",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. ",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -933,18 +1223,17 @@
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired. ",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. ",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -954,28 +1243,7 @@
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -990,13 +1258,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Mispredicted taken branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1010,6 +1277,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Count cases of saving new LBR records by hardware.",
"EventCode": "0xCC",
"Counter": "0,1,2,3",
@@ -1028,280 +1305,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
index c8de548b78fa..4645e9d3f460 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -1,5 +1,15 @@
[
{
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x82",
@@ -9,6 +19,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x84",
@@ -18,6 +38,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x88",
@@ -164,35 +194,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index fe1a2c47cabf..93656f2fd53a 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -23,10 +23,7 @@ GenuineIntel-6-1E,v2,nehalemep,core
GenuineIntel-6-1F,v2,nehalemep,core
GenuineIntel-6-1A,v2,nehalemep,core
GenuineIntel-6-2E,v2,nehalemex,core
-GenuineIntel-6-4E,v24,skylake,core
-GenuineIntel-6-5E,v24,skylake,core
-GenuineIntel-6-8E,v24,skylake,core
-GenuineIntel-6-9E,v24,skylake,core
+GenuineIntel-6-[4589]E,v24,skylake,core
GenuineIntel-6-37,v13,silvermont,core
GenuineIntel-6-4D,v13,silvermont,core
GenuineIntel-6-4C,v13,silvermont,core
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 0bd1bc5302de..82be7d1b8b81 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -36,12 +36,13 @@
"BriefDescription": "L2 cache request misses"
},
{
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events. \r\n",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles the NIP stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses."
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index 0551a9ba865d..54bfe9e4045c 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -1,442 +1,6 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L1 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L1 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand\n from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss outstandings duration in cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cache line split locks sent to the uncore.",
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
"UMask": "0x21",
@@ -446,122 +10,123 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "RFO requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe1",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe2",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
+ "BriefDescription": "Demand requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of L2 code requests.",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe4",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "UMask": "0x38",
+ "EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "PublicDescription": "All requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xf8",
- "EventName": "L2_RQSTS.ALL_PF",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "BriefDescription": "All requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x38",
- "EventName": "L2_RQSTS.PF_MISS",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xd8",
- "EventName": "L2_RQSTS.PF_HIT",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "BriefDescription": "RFO requests that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "RFO requests that hit L2 cache.",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "RFO requests that miss L2 cache.",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "UMask": "0xd8",
+ "EventName": "L2_RQSTS.PF_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "UMask": "0xe1",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "BriefDescription": "Demand Data Read requests",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "L2 cache misses when fetching instructions.",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "UMask": "0xe2",
+ "EventName": "L2_RQSTS.ALL_RFO",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
+ "BriefDescription": "RFO requests to L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand requests that miss L2 cache.",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "UMask": "0xe4",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
+ "BriefDescription": "L2 code requests",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -575,13 +140,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "All requests that miss L2 cache.",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
+ "UMask": "0xf8",
+ "EventName": "L2_RQSTS.ALL_PF",
"SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -595,62 +160,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
- "EventCode": "0xF2",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "EventCode": "0x2E",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "UMask": "0x41",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "EventCode": "0x2E",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "L2_LINES_IN.ALL",
+ "UMask": "0x4f",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -666,3121 +214,483 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400002 ",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "EventCode": "0x51",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400002 ",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400002 ",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0002 ",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0002 ",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0002 ",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0002 ",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0002 ",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0002 ",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0002 ",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100002 ",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100002 ",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "EventCode": "0xB2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100002 ",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions that miss the STLB.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x11",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired store instructions that miss the STLB.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x12",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x41",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x42",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load instructions. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080002 ",
+ "PEBS": "1",
+ "PublicDescription": "All retired store instructions.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x82",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store instructions. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040002 ",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020002 ",
+ "PEBS": "1",
+ "EventCode": "0xD4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020002 ",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020002 ",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x1f",
+ "EventName": "L2_LINES_IN.ALL",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020002 ",
+ "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002 ",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "EventCode": "0xF4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc0400001 ",
"Counter": "0,1,2,3",
@@ -3793,6 +703,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000400001 ",
"Counter": "0,1,2,3",
@@ -3805,6 +716,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400400001 ",
"Counter": "0,1,2,3",
@@ -3817,6 +729,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200400001 ",
"Counter": "0,1,2,3",
@@ -3829,6 +742,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001 ",
"Counter": "0,1,2,3",
@@ -3841,6 +755,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080400001 ",
"Counter": "0,1,2,3",
@@ -3853,18 +768,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc01c0001 ",
"Counter": "0,1,2,3",
@@ -3877,6 +781,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10001c0001 ",
"Counter": "0,1,2,3",
@@ -3889,6 +794,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04001c0001 ",
"Counter": "0,1,2,3",
@@ -3901,6 +807,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02001c0001 ",
"Counter": "0,1,2,3",
@@ -3913,6 +820,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01001c0001 ",
"Counter": "0,1,2,3",
@@ -3925,6 +833,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00801c0001 ",
"Counter": "0,1,2,3",
@@ -3937,270 +846,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc0020001 ",
"Counter": "0,1,2,3",
@@ -4213,6 +859,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020001 ",
"Counter": "0,1,2,3",
@@ -4225,6 +872,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020001 ",
"Counter": "0,1,2,3",
@@ -4237,6 +885,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020001 ",
"Counter": "0,1,2,3",
@@ -4249,6 +898,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020001 ",
"Counter": "0,1,2,3",
@@ -4261,6 +911,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020001 ",
"Counter": "0,1,2,3",
@@ -4273,18 +924,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010001 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
index 3c6b59af5d54..213dd6230cf2 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -27,13 +27,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -55,7 +54,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index e697dbd63e6e..578dff5bd823 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -1,62 +1,81 @@
[
{
- "EventCode": "0x80",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "ICACHE_16B.IFDATA_STALL",
+ "EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -66,95 +85,99 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "EventCode": "0x80",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "UMask": "0x4",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
+ "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
+ "UMask": "0x1",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
+ "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread\n\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions)\n \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -164,7 +187,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -175,7 +198,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -186,6 +209,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -196,6 +220,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -217,7 +242,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -228,6 +253,7 @@
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"EventCode": "0xC6",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
@@ -235,7 +261,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -248,7 +274,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -261,12 +287,13 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"EventCode": "0xC6",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
@@ -274,12 +301,13 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"EventCode": "0xC6",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
@@ -287,7 +315,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -300,7 +328,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -313,7 +341,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -326,34 +354,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"EventCode": "0xC6",
"MSRValue": "0x400806",
"Counter": "0,1,2,3",
@@ -367,6 +374,7 @@
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
@@ -374,12 +382,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
@@ -387,7 +396,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -400,7 +409,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -413,7 +422,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -426,7 +435,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -439,12 +448,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
@@ -452,7 +462,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -465,7 +475,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index d7fd5b06825b..3bd8b712c889 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -1,6 +1,74 @@
[
{
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -10,7 +78,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -50,7 +118,77 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "SKL089",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -71,7 +209,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -99,13 +237,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -128,7 +265,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
"EventCode": "0xC9",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -149,7 +286,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered.",
+ "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
"EventCode": "0xC9",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -208,17 +345,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL089",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "2",
"PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
@@ -331,1718 +457,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3ffc000001 ",
"Counter": "0,1,2,3",
@@ -2055,18 +470,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x103c000001 ",
"Counter": "0,1,2,3",
@@ -2079,6 +483,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000001 ",
"Counter": "0,1,2,3",
@@ -2091,6 +496,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000001 ",
"Counter": "0,1,2,3",
@@ -2103,6 +509,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000001 ",
"Counter": "0,1,2,3",
@@ -2115,6 +522,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000001 ",
"Counter": "0,1,2,3",
@@ -2127,18 +535,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc4000001 ",
"Counter": "0,1,2,3",
@@ -2151,18 +548,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000001 ",
"Counter": "0,1,2,3",
@@ -2175,6 +561,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000001 ",
"Counter": "0,1,2,3",
@@ -2187,6 +574,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000001 ",
"Counter": "0,1,2,3",
@@ -2199,6 +587,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000001 ",
"Counter": "0,1,2,3",
@@ -2211,6 +600,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000001 ",
"Counter": "0,1,2,3",
@@ -2221,89 +611,5 @@
"BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
index cfdc323acc82..84a316d380ac 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -1,11 +1,47 @@
[
{
- "PublicDescription": "This event counts the number of hardware interruptions received by the processor.",
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
- "SampleAfterValue": "100003",
+ "SampleAfterValue": "203",
"BriefDescription": "Number of hardware interrupts received by the processor.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index 0f7adb809be3..bc6d2afbcd8a 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,80 +1,92 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
+ "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xE6",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ANY",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
- "EventCode": "0x87",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -87,33 +99,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
+ "EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -126,361 +140,318 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "EventCode": "0x5E",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xCC",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "UMask": "0x0",
+ "EdgeDetect": "1",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type. ",
- "CounterMask": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
- "EventCode": "0xC0",
- "Counter": "1",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x1",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventCode": "0x5E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
+ "EventCode": "0x5E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. ",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
- "EventCode": "0xB1",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of uops executed from any thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED.X87",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "UMask": "0x10",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x14",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -490,6 +461,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -499,6 +471,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -508,6 +481,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -517,6 +491,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -535,212 +510,196 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Number of uops executed from any thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
+ "BriefDescription": "Number of uops executed on the core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap)\n\n - store forwarding is impossible due to u-arch limitations\n\n - preceding lock RMW operations are not forwarded\n\n - store has the no-forward bit set (uncacheable/page-split/masked stores)\n\n - all-blocking stores are used (mostly, fences and port I/O)\n\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "EventCode": "0x07",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "EventName": "UOPS_EXECUTED.X87",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0x14",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "UMask": "0x0",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
},
{
"PEBS": "2",
@@ -757,183 +716,235 @@
"CounterHTOff": "0,2,3"
},
{
- "EventCode": "0x14",
+ "EventCode": "0xC1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "UMask": "0x3f",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "CounterMask": "1",
+ "BriefDescription": "Retirement slots used.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles without actually retired uops.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC1",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "OTHER_ASSISTS.ANY",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register.\r\nFor more information, refer to ?Mixing Intel AVX and Intel SSE Code? section of the Optimization Guide.",
- "EventCode": "0x0E",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "UMask": "0x2",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
+ "UMask": "0x8",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
+ "UMask": "0x10",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
+ "UMask": "0x20",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterMask": "1",
+ "UMask": "0x40",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
- "EventCode": "0x3C",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EdgeDetect": "1",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "CounterMask": "1",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
index 02f32cbf6789..2bcba7daca14 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -1,83 +1,6 @@
[
{
- "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.WALK_PENDING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -87,45 +10,68 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -135,7 +81,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -145,45 +91,68 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -193,73 +162,77 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
- "EventCode": "0xBD",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
+ "EventCode": "0x4F",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TLB_FLUSH.DTLB_THREAD",
- "SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
- "EventCode": "0xBD",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "UMask": "0x8",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
- "CounterMask": "1",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
- "CounterMask": "1",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -268,5 +241,44 @@
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index b5bc742b6fbc..5c9940866acd 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -265,7 +265,7 @@
{
"EventCode": "0x60",
"UMask": "0x2",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -398,22 +398,24 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load instructions that miss the STLB.",
+ "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "Retired load instructions that miss the STLB.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store instructions that miss the STLB.",
+ "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "Retired store instructions that miss the STLB.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -421,7 +423,7 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load instructions with locked access.",
+ "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -432,24 +434,22 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -457,7 +457,7 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "All retired load instructions. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -468,11 +468,12 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "All retired store instructions. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PublicDescription": "All retired store instructions.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -485,7 +486,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -509,7 +510,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. ",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -545,7 +546,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. ",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -557,7 +558,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. ",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -616,7 +617,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -639,7 +639,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
- "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -648,9 +647,9 @@
"UMask": "0x8",
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
"Data_LA": "1",
+ "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -697,7 +696,7 @@
{
"EventCode": "0xF2",
"UMask": "0x2",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped ",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
@@ -742,7 +741,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -755,7 +754,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -768,7 +767,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -781,7 +780,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -794,7 +793,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -807,7 +806,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -820,7 +819,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -833,7 +832,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -846,7 +845,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -859,7 +858,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -872,7 +871,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -885,7 +884,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -898,7 +897,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -911,7 +910,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -924,7 +923,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -937,7 +936,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -950,7 +949,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -963,7 +962,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -976,7 +975,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -989,7 +988,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1002,7 +1001,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1015,7 +1014,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1028,7 +1027,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1041,7 +1040,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1054,7 +1053,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1067,7 +1066,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1080,7 +1079,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1093,7 +1092,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1106,7 +1105,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1119,7 +1118,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1132,7 +1131,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1145,7 +1144,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1158,7 +1157,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1171,7 +1170,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1184,7 +1183,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1197,7 +1196,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1210,7 +1209,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1223,7 +1222,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1236,7 +1235,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1249,7 +1248,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1262,7 +1261,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1275,7 +1274,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1288,7 +1287,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1301,7 +1300,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1314,7 +1313,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1327,7 +1326,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1340,7 +1339,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1353,7 +1352,85 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that have any response type.",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "MSRValue": "0x04003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD",
+ "MSRValue": "0x08003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "MSRValue": "0x10003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3.",
+ "MSRValue": "0x3f803c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1366,7 +1443,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1379,7 +1456,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1392,7 +1469,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1405,7 +1482,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1418,7 +1495,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1431,7 +1508,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1444,7 +1521,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1457,7 +1534,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1470,7 +1547,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1483,7 +1560,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1496,7 +1573,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1509,7 +1586,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1522,7 +1599,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1535,7 +1612,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1548,7 +1625,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1561,7 +1638,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1574,7 +1651,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1587,7 +1664,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1600,7 +1677,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1613,7 +1690,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1626,7 +1703,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1639,7 +1716,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1652,7 +1729,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1665,7 +1742,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 1c09a328df36..286ed1a37ec9 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -29,10 +29,9 @@
{
"EventCode": "0xC7",
"UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 40abc0852cd6..403a4f89e9b2 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -182,7 +182,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -247,20 +247,20 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. ",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -268,7 +268,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x12",
"Counter": "0,1,2,3",
@@ -281,7 +281,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x13",
"Counter": "0,1,2,3",
@@ -294,7 +294,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
@@ -308,13 +308,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. ",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -322,7 +322,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x400206",
"Counter": "0,1,2,3",
@@ -335,7 +335,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x200206",
"Counter": "0,1,2,3",
@@ -348,7 +348,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x400406",
"Counter": "0,1,2,3",
@@ -367,7 +367,7 @@
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -375,13 +375,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -389,13 +389,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -403,7 +403,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x404006",
"Counter": "0,1,2,3",
@@ -416,7 +416,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x408006",
"Counter": "0,1,2,3",
@@ -429,7 +429,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x410006",
"Counter": "0,1,2,3",
@@ -442,7 +442,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x420006",
"Counter": "0,1,2,3",
@@ -455,13 +455,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -469,7 +469,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x300206",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index ca22a22c1abd..e7f1aa31226d 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -214,7 +214,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -239,10 +239,9 @@
{
"EventCode": "0xC8",
"UMask": "0x20",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -292,7 +291,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered.",
+ "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -466,7 +465,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3. ",
+ "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -479,7 +478,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -492,7 +491,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -505,7 +504,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -518,7 +517,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -531,7 +530,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -544,7 +543,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -557,7 +556,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -570,7 +569,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -583,7 +582,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -596,7 +595,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -609,7 +608,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -622,7 +621,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3. ",
+ "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -635,7 +634,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -648,7 +647,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -661,7 +660,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -674,7 +673,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -687,7 +686,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -700,7 +699,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -713,7 +712,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -726,7 +725,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -739,7 +738,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -752,7 +751,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -765,7 +764,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -778,7 +777,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -791,7 +790,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -804,7 +803,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -817,7 +816,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -830,7 +829,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -843,7 +842,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -856,7 +855,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -869,7 +868,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -882,7 +881,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -895,7 +894,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -908,7 +907,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -921,7 +920,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -934,7 +933,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -947,7 +946,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -960,7 +959,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -973,7 +972,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -986,7 +985,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -999,7 +998,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1012,7 +1011,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1025,7 +1024,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1038,7 +1037,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1051,7 +1050,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1064,7 +1063,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1077,7 +1076,85 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss in the L3.",
+ "MSRValue": "0x3fbc008000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.",
+ "MSRValue": "0x083fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.",
+ "MSRValue": "0x103fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.",
+ "MSRValue": "0x063fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.",
+ "MSRValue": "0x063b808000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.",
+ "MSRValue": "0x0604008000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1090,7 +1167,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1103,7 +1180,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1116,7 +1193,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1129,7 +1206,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1142,7 +1219,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1155,7 +1232,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1168,7 +1245,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss in the L3. ",
+ "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1181,7 +1258,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1194,7 +1271,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1207,7 +1284,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1220,7 +1297,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1233,7 +1310,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1246,7 +1323,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1259,7 +1336,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1272,7 +1349,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1285,7 +1362,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1298,7 +1375,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1311,7 +1388,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1324,7 +1401,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1337,7 +1414,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1350,7 +1427,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1363,7 +1440,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1376,7 +1453,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1389,8 +1466,8 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 70243b0b0586..778a541463eb 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -40,6 +40,42 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x32",
+ "UMask": "0x1",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x2",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x4",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x8",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCB",
"UMask": "0x1",
"BriefDescription": "Number of hardware interrupts received by the processor.",
@@ -50,6 +86,62 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xEF",
+ "UMask": "0x1",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x2",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x4",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x8",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x10",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x20",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x40",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xFE",
"UMask": "0x2",
"BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
@@ -69,4 +161,4 @@
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 0895d1e52a4a..f99f7ae27820 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -3,41 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"AnyThread": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -126,7 +126,7 @@
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"Counter": "0,1,2,3",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -762,11 +762,10 @@
"EdgeDetect": "1",
"EventCode": "0xC3",
"UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type. ",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"Counter": "0,1,2,3",
"EventName": "MACHINE_CLEARS.COUNT",
"CounterMask": "1",
- "PublicDescription": "Number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -799,7 +798,7 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -811,14 +810,14 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. ",
+ "BriefDescription": "All (macro) branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -835,7 +834,7 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -858,19 +857,19 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -891,7 +890,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -902,14 +901,14 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
@@ -920,10 +919,11 @@
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
index 70750dab7ead..7f466c97e485 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
@@ -12,30 +12,30 @@
{
"EventCode": "0x08",
"UMask": "0x2",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts demand data loads that caused a completed page walk (4K page size). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x4",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts demand data loads that caused a completed page walk (2M and 4M page sizes). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x8",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -52,17 +52,17 @@
{
"EventCode": "0x08",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"CounterMask": "1",
@@ -93,30 +93,30 @@
{
"EventCode": "0x49",
"UMask": "0x2",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts demand data stores that caused a completed page walk (4K page size). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x4",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts demand data stores that caused a completed page walk (2M and 4M page sizes). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x8",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,17 +133,17 @@
{
"EventCode": "0x49",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"CounterMask": "1",
@@ -197,7 +197,7 @@
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts completed page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -224,10 +224,10 @@
{
"EventCode": "0x85",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 9eb7047bafe4..b578aa26e375 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -116,6 +116,43 @@ static void fixdesc(char *s)
*e = 0;
}
+/* Add escapes for '\' so they are proper C strings. */
+static char *fixregex(char *s)
+{
+ int len = 0;
+ int esc_count = 0;
+ char *fixed = NULL;
+ char *p, *q;
+
+ /* Count the number of '\' in string */
+ for (p = s; *p; p++) {
+ ++len;
+ if (*p == '\\')
+ ++esc_count;
+ }
+
+ if (esc_count == 0)
+ return s;
+
+ /* allocate space for a new string */
+ fixed = (char *) malloc(len + 1);
+ if (!fixed)
+ return NULL;
+
+ /* copy over the characters */
+ q = fixed;
+ for (p = s; *p; p++) {
+ if (*p == '\\') {
+ *q = '\\';
+ ++q;
+ }
+ *q = *p;
+ ++q;
+ }
+ *q = '\0';
+ return fixed;
+}
+
static struct msrmap {
const char *num;
const char *pname;
@@ -648,7 +685,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
}
line[strlen(line)-1] = '\0';
- cpuid = strtok_r(p, ",", &save);
+ cpuid = fixregex(strtok_r(p, ",", &save));
version = strtok_r(NULL, ",", &save);
fname = strtok_r(NULL, ",", &save);
type = strtok_r(NULL, ",", &save);
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-record b/tools/perf/scripts/python/bin/mem-phys-addr-record
new file mode 100644
index 000000000000..5a875122a904
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-record
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+#
+# Profiling physical memory by all retired load instructions/uops event
+# MEM_INST_RETIRED.ALL_LOADS or MEM_UOPS_RETIRED.ALL_LOADS
+#
+
+load=`perf list | grep mem_inst_retired.all_loads`
+if [ -z "$load" ]; then
+ load=`perf list | grep mem_uops_retired.all_loads`
+fi
+if [ -z "$load" ]; then
+ echo "There is no event to count all retired load instructions/uops."
+ exit 1
+fi
+
+arg=$(echo $load | tr -d ' ')
+arg="$arg:P"
+perf record --phys-data -e $arg $@
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-report b/tools/perf/scripts/python/bin/mem-phys-addr-report
new file mode 100644
index 000000000000..3f2b847e2eab
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: resolve physical address samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/mem-phys-addr.py
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
new file mode 100644
index 000000000000..ebee2c5ae496
--- /dev/null
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -0,0 +1,95 @@
+# mem-phys-addr.py: Resolve physical address samples
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2018, Intel Corporation.
+
+from __future__ import division
+import os
+import sys
+import struct
+import re
+import bisect
+import collections
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+#physical address ranges for System RAM
+system_ram = []
+#physical address ranges for Persistent Memory
+pmem = []
+#file object for proc iomem
+f = None
+#Count for each type of memory
+load_mem_type_cnt = collections.Counter()
+#perf event name
+event_name = None
+
+def parse_iomem():
+ global f
+ f = open('/proc/iomem', 'r')
+ for i, j in enumerate(f):
+ m = re.split('-|:',j,2)
+ if m[2].strip() == 'System RAM':
+ system_ram.append(long(m[0], 16))
+ system_ram.append(long(m[1], 16))
+ if m[2].strip() == 'Persistent Memory':
+ pmem.append(long(m[0], 16))
+ pmem.append(long(m[1], 16))
+
+def print_memory_type():
+ print "Event: %s" % (event_name)
+ print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
+ print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ "-----------", "-----------"),
+ total = sum(load_mem_type_cnt.values())
+ for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
+ key = lambda(k, v): (v, k), reverse = True):
+ print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+
+def trace_begin():
+ parse_iomem()
+
+def trace_end():
+ print_memory_type()
+ f.close()
+
+def is_system_ram(phys_addr):
+ #/proc/iomem is sorted
+ position = bisect.bisect(system_ram, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def is_persistent_mem(phys_addr):
+ position = bisect.bisect(pmem, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def find_memory_type(phys_addr):
+ if phys_addr == 0:
+ return "N/A"
+ if is_system_ram(phys_addr):
+ return "System RAM"
+
+ if is_persistent_mem(phys_addr):
+ return "Persistent Memory"
+
+ #slow path, search all
+ f.seek(0, 0)
+ for j in f:
+ m = re.split('-|:',j,2)
+ if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ return m[2]
+ return "N/A"
+
+def process_event(param_dict):
+ name = param_dict["ev_name"]
+ sample = param_dict["sample"]
+ phys_addr = sample["phys_addr"]
+
+ global event_name
+ if event_name == None:
+ event_name = name
+ load_mem_type_cnt[find_memory_type(phys_addr)] += 1
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 0e1367f90af5..97f64ad7fa08 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -124,6 +124,12 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
WRITE_ASS(exclude_guest, "d");
WRITE_ASS(exclude_callchain_kernel, "d");
WRITE_ASS(exclude_callchain_user, "d");
+ WRITE_ASS(mmap2, "d");
+ WRITE_ASS(comm_exec, "d");
+ WRITE_ASS(context_switch, "d");
+ WRITE_ASS(write_backward, "d");
+ WRITE_ASS(namespaces, "d");
+ WRITE_ASS(use_clockid, "d");
WRITE_ASS(wakeup_events, PRIu32);
WRITE_ASS(bp_type, PRIu32);
WRITE_ASS(config1, "llu");
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 71b9a0b613d2..e0b1b414d466 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -31,10 +31,12 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
int i;
for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &evlist->overwrite_mmap[i];
union perf_event *event;
+ u64 start, end;
- perf_mmap__read_catchup(&evlist->backward_mmap[i]);
- while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
+ perf_mmap__read_init(map, true, &start, &end);
+ while ((event = perf_mmap__read_event(map, true, &start, end)) != NULL) {
const u32 type = event->header.type;
switch (type) {
@@ -49,6 +51,7 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
return TEST_FAIL;
}
}
+ perf_mmap__read_done(map);
}
return TEST_OK;
}
@@ -59,7 +62,7 @@ static int do_test(struct perf_evlist *evlist, int mmap_pages,
int err;
char sbuf[STRERR_BUFSIZE];
- err = perf_evlist__mmap(evlist, mmap_pages, true);
+ err = perf_evlist__mmap(evlist, mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 335b695f4970..a467615c5a0e 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -296,7 +296,7 @@ bool test__bp_signal_is_supported(void)
* instruction breakpoint using the perf event interface.
* Once it's there we can release this.
*/
-#ifdef __powerpc__
+#if defined(__powerpc__) || defined(__s390x__)
return false;
#else
return true;
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
index 268e5f8e4aa2..e4123c1b0e88 100644
--- a/tools/perf/tests/bpf-script-example.c
+++ b/tools/perf/tests/bpf-script-example.c
@@ -31,8 +31,8 @@ struct bpf_map_def SEC("maps") flip_table = {
.max_entries = 1,
};
-SEC("func=SyS_epoll_wait")
-int bpf_func__SyS_epoll_wait(void *ctx)
+SEC("func=SyS_epoll_pwait")
+int bpf_func__SyS_epoll_pwait(void *ctx)
{
int ind =0;
int *flag = bpf_map_lookup_elem(&flip_table, &ind);
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 34c22cdf4d5d..e8399beca62b 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -3,6 +3,7 @@
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <util/util.h>
#include <util/bpf-loader.h>
#include <util/evlist.h>
@@ -19,13 +20,13 @@
#ifdef HAVE_LIBBPF_SUPPORT
-static int epoll_wait_loop(void)
+static int epoll_pwait_loop(void)
{
int i;
/* Should fail NR_ITERS times */
for (i = 0; i < NR_ITERS; i++)
- epoll_wait(-(i + 1), NULL, 0, 0);
+ epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
return 0;
}
@@ -63,46 +64,41 @@ static struct {
bool pin;
} bpf_testcase_table[] = {
{
- LLVM_TESTCASE_BASE,
- "Basic BPF filtering",
- "[basic_bpf_test]",
- "fix 'perf test LLVM' first",
- "load bpf object failed",
- &epoll_wait_loop,
- (NR_ITERS + 1) / 2,
- false,
+ .prog_id = LLVM_TESTCASE_BASE,
+ .desc = "Basic BPF filtering",
+ .name = "[basic_bpf_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "load bpf object failed",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
},
{
- LLVM_TESTCASE_BASE,
- "BPF pinning",
- "[bpf_pinning]",
- "fix kbuild first",
- "check your vmlinux setting?",
- &epoll_wait_loop,
- (NR_ITERS + 1) / 2,
- true,
+ .prog_id = LLVM_TESTCASE_BASE,
+ .desc = "BPF pinning",
+ .name = "[bpf_pinning]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
+ .pin = true,
},
#ifdef HAVE_BPF_PROLOGUE
{
- LLVM_TESTCASE_BPF_PROLOGUE,
- "BPF prologue generation",
- "[bpf_prologue_test]",
- "fix kbuild first",
- "check your vmlinux setting?",
- &llseek_loop,
- (NR_ITERS + 1) / 4,
- false,
+ .prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
+ .desc = "BPF prologue generation",
+ .name = "[bpf_prologue_test]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &llseek_loop,
+ .expect_result = (NR_ITERS + 1) / 4,
},
#endif
{
- LLVM_TESTCASE_BPF_RELOCATION,
- "BPF relocation checker",
- "[bpf_relocation_test]",
- "fix 'perf test LLVM' first",
- "libbpf error when dealing with relocation",
- NULL,
- 0,
- false,
+ .prog_id = LLVM_TESTCASE_BPF_RELOCATION,
+ .desc = "BPF relocation checker",
+ .name = "[bpf_relocation_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "libbpf error when dealing with relocation",
},
};
@@ -167,7 +163,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
@@ -190,7 +186,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
}
if (count != expect) {
- pr_debug("BPF filter result incorrect\n");
+ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 766573e236e4..fafa014240cd 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -411,9 +411,9 @@ static const char *shell_test__description(char *description, size_t size,
return description ? trim(description + 1) : NULL;
}
-#define for_each_shell_test(dir, ent) \
+#define for_each_shell_test(dir, base, ent) \
while ((ent = readdir(dir)) != NULL) \
- if (ent->d_type == DT_REG && ent->d_name[0] != '.')
+ if (!is_directory(base, ent))
static const char *shell_tests__dir(char *path, size_t size)
{
@@ -452,7 +452,7 @@ static int shell_tests__max_desc_width(void)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, path, ent) {
char bf[256];
const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name);
@@ -504,7 +504,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, st.dir, ent) {
int curr = i++;
char desc[256];
struct test test = {
@@ -614,7 +614,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, path, ent) {
int curr = i++;
char bf[256];
struct test t = {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index fcc8984bc329..3bf7b145b826 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -639,7 +639,7 @@ static int do_test_code_reading(bool try_kcore)
break;
}
- ret = perf_evlist__mmap(evlist, UINT_MAX, false);
+ ret = perf_evlist__mmap(evlist, UINT_MAX);
if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n");
goto out_put;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index ac40e05bcab4..260418969120 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -173,6 +173,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
}
callchain_param.record_mode = CALLCHAIN_DWARF;
+ dwarf_callchain_users = true;
if (init_live_machine(machine)) {
pr_err("Could not init machine\n");
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 842d33637a18..c46530918938 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -95,7 +95,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
goto out_err;
}
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
/*
* First, test that a 'comm' event can be found when the event is
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 5a8bf318f8a7..c0e971da965c 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -94,7 +94,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
expected_nr_events[i] = 1 + rand() % 127;
}
- if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ if (perf_evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index d9619d265314..43519267b93b 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include "perf.h"
#include "evlist.h"
#include "evsel.h"
@@ -64,7 +67,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, UINT_MAX);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index f0679613bd18..18b06444f230 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -13,7 +13,6 @@
#include <unistd.h>
#include <linux/kernel.h>
#include <linux/hw_breakpoint.h>
-#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index c34904d37705..0afafab85238 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -141,7 +141,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
* fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 3ec6302b6498..0e2d00d69e6e 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -248,7 +248,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
event->header.size = sz;
err = perf_event__synthesize_sample(event, sample_type, read_format,
- &sample, false);
+ &sample);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
"perf_event__synthesize_sample", sample_type, err);
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 7a84d73324e3..c446c894b297 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,8 +10,8 @@
. $(dirname $0)/lib/probe.sh
-ld=$(realpath /lib64/ld*.so.* | uniq)
-libc=$(echo $ld | sed 's/ld/libc/g')
+libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
+nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
trace_libc_inet_pton_backtrace() {
idx=0
@@ -22,10 +22,23 @@ trace_libc_inet_pton_backtrace() {
expected[4]="rtt min.*"
expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
expected[6]=".*inet_pton[[:space:]]\($libc\)$"
- expected[7]="getaddrinfo[[:space:]]\($libc\)$"
- expected[8]=".*\(.*/bin/ping.*\)$"
+ case "$(uname -m)" in
+ s390x)
+ eventattr='call-graph=dwarf'
+ expected[7]="gaih_inet[[:space:]]\(inlined\)$"
+ expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
+ expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
+ expected[10]="__libc_start_main[[:space:]]\($libc\)$"
+ expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
+ ;;
+ *)
+ eventattr='max-stack=3'
+ expected[7]="getaddrinfo[[:space:]]\($libc\)$"
+ expected[8]=".*\(.*/bin/ping.*\)$"
+ ;;
+ esac
- perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
+ perf trace --no-syscalls -e probe_libc:inet_pton/$eventattr/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
echo $line
echo "$line" | egrep -q "${expected[$idx]}"
if [ $? -ne 0 ] ; then
@@ -33,10 +46,13 @@ trace_libc_inet_pton_backtrace() {
exit 1
fi
let idx+=1
- [ $idx -eq 9 ] && break
+ [ -z "${expected[$idx]}" ] && break
done
}
+# Check for IPv6 interface existence
+ip a sh lo | fgrep -q inet6 || exit 2
+
skip_if_no_perf_probe && \
perf probe -q $libc inet_pton && \
trace_libc_inet_pton_backtrace
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2e68c5f120da..55ad9793d544 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,8 +17,9 @@ skip_if_no_perf_probe || exit 2
file=$(mktemp /tmp/temporary_file.XXXXX)
trace_open_vfs_getname() {
- perf trace -e open touch $file 2>&1 | \
- egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+ evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+ perf trace -e $evts touch $file 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 725a196991a8..f6c72f915d48 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, 128, true);
+ err = perf_evlist__mmap(evlist, 128);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 7d3f4bf9534f..33e00295a972 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -449,7 +449,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out;
}
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, UINT_MAX);
if (err) {
pr_debug("perf_evlist__mmap failed!\n");
goto out_err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index bc4a7344e274..01b62b81751b 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
evsel = perf_evlist__first(evlist);
evsel->attr.task = 1;
+#ifdef __s390x__
+ evsel->attr.sample_freq = 1000000;
+#else
evsel->attr.sample_freq = 1;
+#endif
evsel->attr.inherit = 0;
evsel->attr.watermark = 0;
evsel->attr.wakeup_events = 1;
@@ -97,7 +101,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
goto out_delete_evlist;
}
- if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ if (perf_evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index dbcb6a19b375..4de1939b58ba 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -105,7 +105,7 @@ int test__thread_map_remove(struct test *test __maybe_unused, int subtest __mayb
TEST_ASSERT_VAL("failed to allocate map string",
asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
- threads = thread_map__new_str(str, NULL, 0);
+ threads = thread_map__new_str(str, NULL, 0, false);
TEST_ASSERT_VAL("failed to allocate thread_map",
threads);
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 066bbf0f4a74..66330d4b739b 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,5 +1,6 @@
libperf-y += clone.o
libperf-y += fcntl.o
+libperf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
libperf-y += ioctl.o
endif
diff --git a/tools/perf/trace/beauty/arch_errno_names.c b/tools/perf/trace/beauty/arch_errno_names.c
new file mode 100644
index 000000000000..ede031c3a9e0
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_errno_names.c
@@ -0,0 +1 @@
+#include "trace/beauty/generated/arch_errno_name_array.c"
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
new file mode 100755
index 000000000000..22c9fc900c84
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -0,0 +1,100 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate C file mapping errno codes to errno names.
+#
+# Copyright IBM Corp. 2018
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+
+gcc="$1"
+toolsdir="$2"
+include_path="-I$toolsdir/include/uapi"
+
+arch_string()
+{
+ echo "$1" |sed -e 'y/- /__/' |tr '[[:upper:]]' '[[:lower:]]'
+}
+
+asm_errno_file()
+{
+ local arch="$1"
+ local header
+
+ header="$toolsdir/arch/$arch/include/uapi/asm/errno.h"
+ if test -r "$header"; then
+ echo "$header"
+ else
+ echo "$toolsdir/include/uapi/asm-generic/errno.h"
+ fi
+}
+
+create_errno_lookup_func()
+{
+ local arch=$(arch_string "$1")
+ local nr name
+
+ cat <<EoFuncBegin
+static const char *errno_to_name__$arch(int err)
+{
+ switch (err) {
+EoFuncBegin
+
+ while read name nr; do
+ printf '\tcase %d: return "%s";\n' $nr $name
+ done
+
+ cat <<EoFuncEnd
+ default:
+ return "(unknown)";
+ }
+}
+
+EoFuncEnd
+}
+
+process_arch()
+{
+ local arch="$1"
+ local asm_errno=$(asm_errno_file "$arch")
+
+ $gcc $include_path -E -dM -x c $asm_errno \
+ |grep -hE '^#define[[:blank:]]+(E[^[:blank:]]+)[[:blank:]]+([[:digit:]]+).*' \
+ |awk '{ print $2","$3; }' \
+ |sort -t, -k2 -nu \
+ |IFS=, create_errno_lookup_func "$arch"
+}
+
+create_arch_errno_table_func()
+{
+ local archlist="$1"
+ local default="$2"
+ local arch
+
+ printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n'
+ printf '{\n'
+ for arch in $archlist; do
+ printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch")
+ printf '\t\treturn errno_to_name__%s(err);\n' $(arch_string "$arch")
+ done
+ printf '\treturn errno_to_name__%s(err);\n' $(arch_string "$default")
+ printf '}\n'
+}
+
+cat <<EoHEADER
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+
+EoHEADER
+
+# Create list of architectures and ignore those that do not appear
+# in tools/perf/arch
+archlist=""
+for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
+ test -d arch/$arch && archlist="$archlist $arch"
+done
+
+for arch in x86 $archlist generic; do
+ process_arch "$arch"
+done
+create_arch_errno_table_func "x86 $archlist" "generic"
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index a6dfd04beaee..984a504d335c 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -79,6 +79,9 @@ size_t syscall_arg__scnprintf_fcntl_cmd(char *bf, size_t size, struct syscall_ar
size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_FCNTL_ARG syscall_arg__scnprintf_fcntl_arg
+size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_FLOCK syscall_arg__scnprintf_flock
+
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_IOCTL_CMD syscall_arg__scnprintf_ioctl_cmd
@@ -114,4 +117,6 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size);
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg));
+const char *arch_syscalls__strerrno(const char *arch, int err);
+
#endif /* _PERF_TRACE_BEAUTY_H */
diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c
index f9707f57566c..c4ff6ad30b06 100644
--- a/tools/perf/trace/beauty/flock.c
+++ b/tools/perf/trace/beauty/flock.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <fcntl.h>
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+#include <uapi/linux/fcntl.h>
#ifndef LOCK_MAND
#define LOCK_MAND 32
@@ -17,8 +20,7 @@
#define LOCK_RW 192
#endif
-static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
- struct syscall_arg *arg)
+size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg)
{
int printed = 0, op = arg->val;
@@ -45,5 +47,3 @@ static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
return printed;
}
-
-#define SCA_FLOCK syscall_arg__scnprintf_flock
diff --git a/tools/perf/trace/beauty/futex_val3.c b/tools/perf/trace/beauty/futex_val3.c
new file mode 100644
index 000000000000..26f6b3253511
--- /dev/null
+++ b/tools/perf/trace/beauty/futex_val3.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/futex.h>
+
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+#endif
+
+static size_t syscall_arg__scnprintf_futex_val3(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned int bitset = arg->val;
+
+ if (bitset == FUTEX_BITSET_MATCH_ANY)
+ return scnprintf(bf, size, "MATCH_ANY");
+
+ return scnprintf(bf, size, "%#xd", bitset);
+}
+
+#define SCA_FUTEX_VAL3 syscall_arg__scnprintf_futex_val3
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 9e1668b2c5d7..417e3ecfe9d7 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
P_MMAP_FLAG(POPULATE);
P_MMAP_FLAG(STACK);
P_MMAP_FLAG(UNINITIALIZED);
+#ifdef MAP_SYNC
+ P_MMAP_FLAG(SYNC);
+#endif
#undef P_MMAP_FLAG
if (flags)
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 8f7f59d1a2b5..286427975112 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -25,16 +25,10 @@ struct disasm_line_samples {
#define IPC_WIDTH 6
#define CYCLES_WIDTH 6
-struct browser_disasm_line {
- struct rb_node rb_node;
- u32 idx;
- int idx_asm;
- int jump_sources;
- /*
- * actual length of this array is saved on the nr_events field
- * of the struct annotate_browser
- */
- struct disasm_line_samples samples[1];
+struct browser_line {
+ u32 idx;
+ int idx_asm;
+ int jump_sources;
};
static struct annotate_browser_opt {
@@ -53,39 +47,43 @@ static struct annotate_browser_opt {
struct arch;
struct annotate_browser {
- struct ui_browser b;
- struct rb_root entries;
- struct rb_node *curr_hot;
- struct disasm_line *selection;
- struct disasm_line **offsets;
- struct arch *arch;
- int nr_events;
- u64 start;
- int nr_asm_entries;
- int nr_entries;
- int max_jump_sources;
- int nr_jumps;
- bool searching_backwards;
- bool have_cycles;
- u8 addr_width;
- u8 jumps_width;
- u8 target_width;
- u8 min_addr_width;
- u8 max_addr_width;
- char search_bf[128];
+ struct ui_browser b;
+ struct rb_root entries;
+ struct rb_node *curr_hot;
+ struct annotation_line *selection;
+ struct annotation_line **offsets;
+ struct arch *arch;
+ int nr_events;
+ u64 start;
+ int nr_asm_entries;
+ int nr_entries;
+ int max_jump_sources;
+ int nr_jumps;
+ bool searching_backwards;
+ bool have_cycles;
+ u8 addr_width;
+ u8 jumps_width;
+ u8 target_width;
+ u8 min_addr_width;
+ u8 max_addr_width;
+ char search_bf[128];
};
-static inline struct browser_disasm_line *disasm_line__browser(struct disasm_line *dl)
+static inline struct browser_line *browser_line(struct annotation_line *al)
{
- return (struct browser_disasm_line *)(dl + 1);
+ void *ptr = al;
+
+ ptr = container_of(al, struct disasm_line, al);
+ return ptr - sizeof(struct browser_line);
}
static bool disasm_line__filter(struct ui_browser *browser __maybe_unused,
void *entry)
{
if (annotate_browser__opts.hide_src_code) {
- struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
- return dl->offset == -1;
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+
+ return al->offset == -1;
}
return false;
@@ -120,11 +118,37 @@ static int annotate_browser__cycles_width(struct annotate_browser *ab)
return ab->have_cycles ? IPC_WIDTH + CYCLES_WIDTH : 0;
}
+static void disasm_line__write(struct disasm_line *dl, struct ui_browser *browser,
+ char *bf, size_t size)
+{
+ if (dl->ins.ops && dl->ins.ops->scnprintf) {
+ if (ins__is_jump(&dl->ins)) {
+ bool fwd = dl->ops.target.offset > dl->al.offset;
+
+ ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
+ SLSMG_UARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else if (ins__is_call(&dl->ins)) {
+ ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else if (ins__is_ret(&dl->ins)) {
+ ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else {
+ ui_browser__write_nstring(browser, " ", 2);
+ }
+ } else {
+ ui_browser__write_nstring(browser, " ", 2);
+ }
+
+ disasm_line__scnprintf(dl, bf, size, !annotate_browser__opts.use_offset);
+}
+
static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
- struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
- struct browser_disasm_line *bdl = disasm_line__browser(dl);
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+ struct browser_line *bl = browser_line(al);
bool current_entry = ui_browser__is_current_entry(browser, row);
bool change_color = (!annotate_browser__opts.hide_src_code &&
(!current_entry || (browser->use_navkeypressed &&
@@ -137,32 +161,32 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
bool show_title = false;
for (i = 0; i < ab->nr_events; i++) {
- if (bdl->samples[i].percent > percent_max)
- percent_max = bdl->samples[i].percent;
+ if (al->samples[i].percent > percent_max)
+ percent_max = al->samples[i].percent;
}
- if ((row == 0) && (dl->offset == -1 || percent_max == 0.0)) {
+ if ((row == 0) && (al->offset == -1 || percent_max == 0.0)) {
if (ab->have_cycles) {
- if (dl->ipc == 0.0 && dl->cycles == 0)
+ if (al->ipc == 0.0 && al->cycles == 0)
show_title = true;
} else
show_title = true;
}
- if (dl->offset != -1 && percent_max != 0.0) {
+ if (al->offset != -1 && percent_max != 0.0) {
for (i = 0; i < ab->nr_events; i++) {
ui_browser__set_percent_color(browser,
- bdl->samples[i].percent,
+ al->samples[i].percent,
current_entry);
if (annotate_browser__opts.show_total_period) {
ui_browser__printf(browser, "%11" PRIu64 " ",
- bdl->samples[i].he.period);
+ al->samples[i].he.period);
} else if (annotate_browser__opts.show_nr_samples) {
ui_browser__printf(browser, "%6" PRIu64 " ",
- bdl->samples[i].he.nr_samples);
+ al->samples[i].he.nr_samples);
} else {
ui_browser__printf(browser, "%6.2f ",
- bdl->samples[i].percent);
+ al->samples[i].percent);
}
}
} else {
@@ -177,16 +201,16 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
}
}
if (ab->have_cycles) {
- if (dl->ipc)
- ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, dl->ipc);
+ if (al->ipc)
+ ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, al->ipc);
else if (!show_title)
ui_browser__write_nstring(browser, " ", IPC_WIDTH);
else
ui_browser__printf(browser, "%*s ", IPC_WIDTH - 1, "IPC");
- if (dl->cycles)
+ if (al->cycles)
ui_browser__printf(browser, "%*" PRIu64 " ",
- CYCLES_WIDTH - 1, dl->cycles);
+ CYCLES_WIDTH - 1, al->cycles);
else if (!show_title)
ui_browser__write_nstring(browser, " ", CYCLES_WIDTH);
else
@@ -199,19 +223,19 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!browser->navkeypressed)
width += 1;
- if (!*dl->line)
+ if (!*al->line)
ui_browser__write_nstring(browser, " ", width - pcnt_width - cycles_width);
- else if (dl->offset == -1) {
- if (dl->line_nr && annotate_browser__opts.show_linenr)
+ else if (al->offset == -1) {
+ if (al->line_nr && annotate_browser__opts.show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ",
- ab->addr_width + 1, dl->line_nr);
+ ab->addr_width + 1, al->line_nr);
else
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
ui_browser__write_nstring(browser, bf, printed);
- ui_browser__write_nstring(browser, dl->line, width - printed - pcnt_width - cycles_width + 1);
+ ui_browser__write_nstring(browser, al->line, width - printed - pcnt_width - cycles_width + 1);
} else {
- u64 addr = dl->offset;
+ u64 addr = al->offset;
int color = -1;
if (!annotate_browser__opts.use_offset)
@@ -220,13 +244,13 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!annotate_browser__opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
- if (bdl->jump_sources) {
+ if (bl->jump_sources) {
if (annotate_browser__opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
ab->jumps_width,
- bdl->jump_sources);
- prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
+ bl->jump_sources);
+ prev = annotate_browser__set_jumps_percent_color(ab, bl->jump_sources,
current_entry);
ui_browser__write_nstring(browser, bf, printed);
ui_browser__set_color(browser, prev);
@@ -245,32 +269,14 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ui_browser__write_nstring(browser, bf, printed);
if (change_color)
ui_browser__set_color(browser, color);
- if (dl->ins.ops && dl->ins.ops->scnprintf) {
- if (ins__is_jump(&dl->ins)) {
- bool fwd = dl->ops.target.offset > dl->offset;
-
- ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
- SLSMG_UARROW_CHAR);
- SLsmg_write_char(' ');
- } else if (ins__is_call(&dl->ins)) {
- ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
- SLsmg_write_char(' ');
- } else if (ins__is_ret(&dl->ins)) {
- ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
- SLsmg_write_char(' ');
- } else {
- ui_browser__write_nstring(browser, " ", 2);
- }
- } else {
- ui_browser__write_nstring(browser, " ", 2);
- }
- disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
+ disasm_line__write(disasm_line(al), browser, bf, sizeof(bf));
+
ui_browser__write_nstring(browser, bf, width - pcnt_width - cycles_width - 3 - printed);
}
if (current_entry)
- ab->selection = dl;
+ ab->selection = al;
}
static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym)
@@ -286,7 +292,7 @@ static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sy
static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
- struct disasm_line *pos = list_prev_entry(cursor, node);
+ struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
if (!pos)
@@ -306,8 +312,9 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
- struct disasm_line *cursor = ab->selection, *target;
- struct browser_disasm_line *btarget, *bcursor;
+ struct disasm_line *cursor = disasm_line(ab->selection);
+ struct annotation_line *target;
+ struct browser_line *btarget, *bcursor;
unsigned int from, to;
struct map_symbol *ms = ab->b.priv;
struct symbol *sym = ms->sym;
@@ -321,11 +328,9 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
return;
target = ab->offsets[cursor->ops.target.offset];
- if (!target)
- return;
- bcursor = disasm_line__browser(cursor);
- btarget = disasm_line__browser(target);
+ bcursor = browser_line(&cursor->al);
+ btarget = browser_line(target);
if (annotate_browser__opts.hide_src_code) {
from = bcursor->idx_asm;
@@ -361,12 +366,11 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
return ret;
}
-static int disasm__cmp(struct browser_disasm_line *a,
- struct browser_disasm_line *b, int nr_pcnt)
+static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
{
int i;
- for (i = 0; i < nr_pcnt; i++) {
+ for (i = 0; i < a->samples_nr; i++) {
if (a->samples[i].percent == b->samples[i].percent)
continue;
return a->samples[i].percent < b->samples[i].percent;
@@ -374,28 +378,27 @@ static int disasm__cmp(struct browser_disasm_line *a,
return 0;
}
-static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl,
- int nr_events)
+static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
- struct browser_disasm_line *l;
+ struct annotation_line *l;
while (*p != NULL) {
parent = *p;
- l = rb_entry(parent, struct browser_disasm_line, rb_node);
+ l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(bdl, l, nr_events))
+ if (disasm__cmp(al, l))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
- rb_link_node(&bdl->rb_node, parent, p);
- rb_insert_color(&bdl->rb_node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
static void annotate_browser__set_top(struct annotate_browser *browser,
- struct disasm_line *pos, u32 idx)
+ struct annotation_line *pos, u32 idx)
{
unsigned back;
@@ -404,7 +407,7 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
browser->b.top_idx = browser->b.index = idx;
while (browser->b.top_idx != 0 && back != 0) {
- pos = list_entry(pos->node.prev, struct disasm_line, node);
+ pos = list_entry(pos->node.prev, struct annotation_line, node);
if (disasm_line__filter(&browser->b, &pos->node))
continue;
@@ -420,12 +423,13 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
struct rb_node *nd)
{
- struct browser_disasm_line *bpos;
- struct disasm_line *pos;
+ struct browser_line *bpos;
+ struct annotation_line *pos;
u32 idx;
- bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
- pos = ((struct disasm_line *)bpos) - 1;
+ pos = rb_entry(nd, struct annotation_line, rb_node);
+ bpos = browser_line(pos);
+
idx = bpos->idx;
if (annotate_browser__opts.hide_src_code)
idx = bpos->idx_asm;
@@ -439,46 +443,35 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos, *next;
- s64 len = symbol__size(sym);
+ struct disasm_line *pos;
browser->entries = RB_ROOT;
pthread_mutex_lock(&notes->lock);
- list_for_each_entry(pos, &notes->src->source, node) {
- struct browser_disasm_line *bpos = disasm_line__browser(pos);
- const char *path = NULL;
+ symbol__calc_percent(sym, evsel);
+
+ list_for_each_entry(pos, &notes->src->source, al.node) {
double max_percent = 0.0;
int i;
- if (pos->offset == -1) {
- RB_CLEAR_NODE(&bpos->rb_node);
+ if (pos->al.offset == -1) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- next = disasm__get_next_ip_line(&notes->src->source, pos);
-
- for (i = 0; i < browser->nr_events; i++) {
- struct sym_hist_entry sample;
-
- bpos->samples[i].percent = disasm__calc_percent(notes,
- evsel->idx + i,
- pos->offset,
- next ? next->offset : len,
- &path, &sample);
- bpos->samples[i].he = sample;
+ for (i = 0; i < pos->al.samples_nr; i++) {
+ struct annotation_data *sample = &pos->al.samples[i];
- if (max_percent < bpos->samples[i].percent)
- max_percent = bpos->samples[i].percent;
+ if (max_percent < sample->percent)
+ max_percent = sample->percent;
}
- if (max_percent < 0.01 && pos->ipc == 0) {
- RB_CLEAR_NODE(&bpos->rb_node);
+ if (max_percent < 0.01 && pos->al.ipc == 0) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- disasm_rb_tree__insert(&browser->entries, bpos,
- browser->nr_events);
+ disasm_rb_tree__insert(&browser->entries, &pos->al);
}
pthread_mutex_unlock(&notes->lock);
@@ -487,38 +480,38 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
- struct disasm_line *dl;
- struct browser_disasm_line *bdl;
+ struct annotation_line *al;
+ struct browser_line *bl;
off_t offset = browser->b.index - browser->b.top_idx;
browser->b.seek(&browser->b, offset, SEEK_CUR);
- dl = list_entry(browser->b.top, struct disasm_line, node);
- bdl = disasm_line__browser(dl);
+ al = list_entry(browser->b.top, struct annotation_line, node);
+ bl = browser_line(al);
if (annotate_browser__opts.hide_src_code) {
- if (bdl->idx_asm < offset)
- offset = bdl->idx;
+ if (bl->idx_asm < offset)
+ offset = bl->idx;
browser->b.nr_entries = browser->nr_entries;
annotate_browser__opts.hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
- browser->b.top_idx = bdl->idx - offset;
- browser->b.index = bdl->idx;
+ browser->b.top_idx = bl->idx - offset;
+ browser->b.index = bl->idx;
} else {
- if (bdl->idx_asm < 0) {
+ if (bl->idx_asm < 0) {
ui_helpline__puts("Only available for assembly lines.");
browser->b.seek(&browser->b, -offset, SEEK_CUR);
return false;
}
- if (bdl->idx_asm < offset)
- offset = bdl->idx_asm;
+ if (bl->idx_asm < offset)
+ offset = bl->idx_asm;
browser->b.nr_entries = browser->nr_asm_entries;
annotate_browser__opts.hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
- browser->b.top_idx = bdl->idx_asm - offset;
- browser->b.index = bdl->idx_asm;
+ browser->b.top_idx = bl->idx_asm - offset;
+ browser->b.index = bl->idx_asm;
}
return true;
@@ -543,7 +536,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct hist_browser_timer *hbt)
{
struct map_symbol *ms = browser->b.priv;
- struct disasm_line *dl = browser->selection;
+ struct disasm_line *dl = disasm_line(browser->selection);
struct annotation *notes;
struct addr_map_symbol target = {
.map = ms->map,
@@ -589,10 +582,10 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
struct disasm_line *pos;
*idx = 0;
- list_for_each_entry(pos, &notes->src->source, node) {
- if (pos->offset == offset)
+ list_for_each_entry(pos, &notes->src->source, al.node) {
+ if (pos->al.offset == offset)
return pos;
- if (!disasm_line__filter(&browser->b, &pos->node))
+ if (!disasm_line__filter(&browser->b, &pos->al.node))
++*idx;
}
@@ -601,7 +594,7 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
static bool annotate_browser__jump(struct annotate_browser *browser)
{
- struct disasm_line *dl = browser->selection;
+ struct disasm_line *dl = disasm_line(browser->selection);
u64 offset;
s64 idx;
@@ -615,29 +608,29 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
return true;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, &dl->al, idx);
return true;
}
static
-struct disasm_line *annotate_browser__find_string(struct annotate_browser *browser,
+struct annotation_line *annotate_browser__find_string(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos = browser->selection;
+ struct annotation_line *al = browser->selection;
*idx = browser->b.index;
- list_for_each_entry_continue(pos, &notes->src->source, node) {
- if (disasm_line__filter(&browser->b, &pos->node))
+ list_for_each_entry_continue(al, &notes->src->source, node) {
+ if (disasm_line__filter(&browser->b, &al->node))
continue;
++*idx;
- if (pos->line && strstr(pos->line, s) != NULL)
- return pos;
+ if (al->line && strstr(al->line, s) != NULL)
+ return al;
}
return NULL;
@@ -645,38 +638,38 @@ struct disasm_line *annotate_browser__find_string(struct annotate_browser *brows
static bool __annotate_browser__search(struct annotate_browser *browser)
{
- struct disasm_line *dl;
+ struct annotation_line *al;
s64 idx;
- dl = annotate_browser__find_string(browser, browser->search_bf, &idx);
- if (dl == NULL) {
+ al = annotate_browser__find_string(browser, browser->search_bf, &idx);
+ if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = false;
return true;
}
static
-struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
+struct annotation_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos = browser->selection;
+ struct annotation_line *al = browser->selection;
*idx = browser->b.index;
- list_for_each_entry_continue_reverse(pos, &notes->src->source, node) {
- if (disasm_line__filter(&browser->b, &pos->node))
+ list_for_each_entry_continue_reverse(al, &notes->src->source, node) {
+ if (disasm_line__filter(&browser->b, &al->node))
continue;
--*idx;
- if (pos->line && strstr(pos->line, s) != NULL)
- return pos;
+ if (al->line && strstr(al->line, s) != NULL)
+ return al;
}
return NULL;
@@ -684,16 +677,16 @@ struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browse
static bool __annotate_browser__search_reverse(struct annotate_browser *browser)
{
- struct disasm_line *dl;
+ struct annotation_line *al;
s64 idx;
- dl = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
- if (dl == NULL) {
+ al = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
+ if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = true;
return true;
}
@@ -899,13 +892,16 @@ show_help:
continue;
case K_ENTER:
case K_RIGHT:
+ {
+ struct disasm_line *dl = disasm_line(browser->selection);
+
if (browser->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
- else if (!browser->selection->ins.ops)
+ else if (!dl->ins.ops)
goto show_sup_ins;
- else if (ins__is_ret(&browser->selection->ins))
+ else if (ins__is_ret(&dl->ins))
goto out;
else if (!(annotate_browser__jump(browser) ||
annotate_browser__callq(browser, evsel, hbt))) {
@@ -913,6 +909,7 @@ show_sup_ins:
ui_helpline__puts("Actions are only available for function call/return & jump/branch instructions.");
}
continue;
+ }
case 't':
if (annotate_browser__opts.show_total_period) {
annotate_browser__opts.show_total_period = false;
@@ -990,10 +987,10 @@ static void count_and_fill(struct annotate_browser *browser, u64 start, u64 end,
return;
for (offset = start; offset <= end; offset++) {
- struct disasm_line *dl = browser->offsets[offset];
+ struct annotation_line *al = browser->offsets[offset];
- if (dl)
- dl->ipc = ipc;
+ if (al)
+ al->ipc = ipc;
}
}
}
@@ -1018,13 +1015,13 @@ static void annotate__compute_ipc(struct annotate_browser *browser, size_t size,
ch = &notes->src->cycles_hist[offset];
if (ch && ch->cycles) {
- struct disasm_line *dl;
+ struct annotation_line *al;
if (ch->have_start)
count_and_fill(browser, ch->start, offset, ch);
- dl = browser->offsets[offset];
- if (dl && ch->num_aggr)
- dl->cycles = ch->cycles_aggr / ch->num_aggr;
+ al = browser->offsets[offset];
+ if (al && ch->num_aggr)
+ al->cycles = ch->cycles_aggr / ch->num_aggr;
browser->have_cycles = true;
}
}
@@ -1043,23 +1040,27 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
return;
for (offset = 0; offset < size; ++offset) {
- struct disasm_line *dl = browser->offsets[offset], *dlt;
- struct browser_disasm_line *bdlt;
+ struct annotation_line *al = browser->offsets[offset];
+ struct disasm_line *dl;
+ struct browser_line *blt;
+
+ dl = disasm_line(al);
if (!disasm_line__is_valid_jump(dl, sym))
continue;
- dlt = browser->offsets[dl->ops.target.offset];
+ al = browser->offsets[dl->ops.target.offset];
+
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
- if (dlt == NULL)
+ if (al == NULL)
continue;
- bdlt = disasm_line__browser(dlt);
- if (++bdlt->jump_sources > browser->max_jump_sources)
- browser->max_jump_sources = bdlt->jump_sources;
+ blt = browser_line(al);
+ if (++blt->jump_sources > browser->max_jump_sources)
+ browser->max_jump_sources = blt->jump_sources;
++browser->nr_jumps;
}
@@ -1078,7 +1079,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
- struct disasm_line *pos, *n;
+ struct annotation_line *al;
struct annotation *notes;
size_t size;
struct map_symbol ms = {
@@ -1097,7 +1098,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
};
int ret = -1, err;
int nr_pcnt = 1;
- size_t sizeof_bdl = sizeof(struct browser_disasm_line);
if (sym == NULL)
return -1;
@@ -1107,21 +1107,16 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- browser.offsets = zalloc(size * sizeof(struct disasm_line *));
+ browser.offsets = zalloc(size * sizeof(struct annotation_line *));
if (browser.offsets == NULL) {
ui__error("Not enough memory!");
return -1;
}
- if (perf_evsel__is_group_event(evsel)) {
+ if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->nr_members;
- sizeof_bdl += sizeof(struct disasm_line_samples) *
- (nr_pcnt - 1);
- }
- err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- sizeof_bdl, &browser.arch,
- perf_evsel__env_cpuid(evsel));
+ err = symbol__annotate(sym, map, evsel, sizeof(struct browser_line), &browser.arch);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
@@ -1129,20 +1124,22 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
goto out_free_offsets;
}
+ symbol__calc_percent(sym, evsel);
+
ui_helpline__push("Press ESC to exit");
notes = symbol__annotation(sym);
browser.start = map__rip_2objdump(map, sym->start);
- list_for_each_entry(pos, &notes->src->source, node) {
- struct browser_disasm_line *bpos;
- size_t line_len = strlen(pos->line);
+ list_for_each_entry(al, &notes->src->source, node) {
+ struct browser_line *bpos;
+ size_t line_len = strlen(al->line);
if (browser.b.width < line_len)
browser.b.width = line_len;
- bpos = disasm_line__browser(pos);
+ bpos = browser_line(al);
bpos->idx = browser.nr_entries++;
- if (pos->offset != -1) {
+ if (al->offset != -1) {
bpos->idx_asm = browser.nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
@@ -1151,8 +1148,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
*
* E.g. copy_user_generic_unrolled
*/
- if (pos->offset < (s64)size)
- browser.offsets[pos->offset] = pos;
+ if (al->offset < (s64)size)
+ browser.offsets[al->offset] = al;
} else
bpos->idx_asm = -1;
}
@@ -1174,10 +1171,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
annotate_browser__update_addr_width(&browser);
ret = annotate_browser__run(&browser, evsel, hbt);
- list_for_each_entry_safe(pos, n, &notes->src->source, node) {
- list_del(&pos->node);
- disasm_line__free(pos);
- }
+
+ annotated_source__purge(notes->src);
out_free_offsets:
free(browser.offsets);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 68146f4620a5..6495ee55d9c3 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -608,7 +608,8 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si
return browser->title ? browser->title(browser, bf, size) : 0;
}
-int hist_browser__run(struct hist_browser *browser, const char *help)
+int hist_browser__run(struct hist_browser *browser, const char *help,
+ bool warn_lost_event)
{
int key;
char title[160];
@@ -638,8 +639,9 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
nr_entries = hist_browser__nr_entries(browser);
ui_browser__update_nr_entries(&browser->b, nr_entries);
- if (browser->hists->stats.nr_lost_warned !=
- browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+ if (warn_lost_event &&
+ (browser->hists->stats.nr_lost_warned !=
+ browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
browser->hists->stats.nr_lost_warned =
browser->hists->stats.nr_events[PERF_RECORD_LOST];
ui_browser__warn_lost_events(&browser->b);
@@ -2763,7 +2765,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
bool left_exits,
struct hist_browser_timer *hbt,
float min_pcnt,
- struct perf_env *env)
+ struct perf_env *env,
+ bool warn_lost_event)
{
struct hists *hists = evsel__hists(evsel);
struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
@@ -2844,7 +2847,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
nr_options = 0;
- key = hist_browser__run(browser, helpline);
+ key = hist_browser__run(browser, helpline,
+ warn_lost_event);
if (browser->he_selection != NULL) {
thread = hist_browser__selected_thread(browser);
@@ -3184,7 +3188,8 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
int nr_events, const char *help,
- struct hist_browser_timer *hbt)
+ struct hist_browser_timer *hbt,
+ bool warn_lost_event)
{
struct perf_evlist *evlist = menu->b.priv;
struct perf_evsel *pos;
@@ -3203,7 +3208,9 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
case K_TIMER:
hbt->timer(hbt->arg);
- if (!menu->lost_events_warned && menu->lost_events) {
+ if (!menu->lost_events_warned &&
+ menu->lost_events &&
+ warn_lost_event) {
ui_browser__warn_lost_events(&menu->b);
menu->lost_events_warned = true;
}
@@ -3224,7 +3231,8 @@ browse_hists:
key = perf_evsel__hists_browse(pos, nr_events, help,
true, hbt,
menu->min_pcnt,
- menu->env);
+ menu->env,
+ warn_lost_event);
ui_browser__show_title(&menu->b, title);
switch (key) {
case K_TAB:
@@ -3282,7 +3290,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
int nr_entries, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
- struct perf_env *env)
+ struct perf_env *env,
+ bool warn_lost_event)
{
struct perf_evsel *pos;
struct perf_evsel_menu menu = {
@@ -3309,13 +3318,15 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
menu.b.width = line_len;
}
- return perf_evsel_menu__run(&menu, nr_entries, help, hbt);
+ return perf_evsel_menu__run(&menu, nr_entries, help,
+ hbt, warn_lost_event);
}
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
- struct perf_env *env)
+ struct perf_env *env,
+ bool warn_lost_event)
{
int nr_entries = evlist->nr_entries;
@@ -3325,7 +3336,7 @@ single_entry:
return perf_evsel__hists_browse(first, nr_entries, help,
false, hbt, min_pcnt,
- env);
+ env, warn_lost_event);
}
if (symbol_conf.event_group) {
@@ -3342,5 +3353,6 @@ single_entry:
}
return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
- hbt, min_pcnt, env);
+ hbt, min_pcnt, env,
+ warn_lost_event);
}
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index ba431777f559..9428bee076f2 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -28,7 +28,8 @@ struct hist_browser {
struct hist_browser *hist_browser__new(struct hists *hists);
void hist_browser__delete(struct hist_browser *browser);
-int hist_browser__run(struct hist_browser *browser, const char *help);
+int hist_browser__run(struct hist_browser *browser, const char *help,
+ bool warn_lost_event);
void hist_browser__init(struct hist_browser *browser,
struct hists *hists);
#endif /* _PERF_UI_BROWSER_HISTS_H_ */
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index fc7a2e105bfd..aeeaf15029f0 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -31,14 +31,14 @@ static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
strcpy(buf, "");
- if (dl->offset == (s64) -1)
+ if (dl->al.offset == (s64) -1)
return 0;
symhist = annotation__histogram(symbol__annotation(sym), evidx);
- if (!symbol_conf.event_group && !symhist->addr[dl->offset].nr_samples)
+ if (!symbol_conf.event_group && !symhist->addr[dl->al.offset].nr_samples)
return 0;
- percent = 100.0 * symhist->addr[dl->offset].nr_samples / symhist->nr_samples;
+ percent = 100.0 * symhist->addr[dl->al.offset].nr_samples / symhist->nr_samples;
markup = perf_gtk__get_percent_color(percent);
if (markup)
@@ -57,16 +57,16 @@ static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
strcpy(buf, "");
- if (dl->offset == (s64) -1)
+ if (dl->al.offset == (s64) -1)
return 0;
- return scnprintf(buf, size, "%"PRIx64, start + dl->offset);
+ return scnprintf(buf, size, "%"PRIx64, start + dl->al.offset);
}
static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
{
int ret = 0;
- char *line = g_markup_escape_text(dl->line, -1);
+ char *line = g_markup_escape_text(dl->al.line, -1);
const char *markup = "<span fgcolor='gray'>";
strcpy(buf, "");
@@ -74,7 +74,7 @@ static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
if (!line)
return 0;
- if (dl->offset != (s64) -1)
+ if (dl->al.offset != (s64) -1)
markup = NULL;
if (markup)
@@ -119,7 +119,7 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
- list_for_each_entry(pos, &notes->src->source, node) {
+ list_for_each_entry(pos, &notes->src->source, al.node) {
GtkTreeIter iter;
int ret = 0;
@@ -148,8 +148,8 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
gtk_container_add(GTK_CONTAINER(window), view);
- list_for_each_entry_safe(pos, n, &notes->src->source, node) {
- list_del(&pos->node);
+ list_for_each_entry_safe(pos, n, &notes->src->source, al.node) {
+ list_del(&pos->al.node);
disasm_line__free(pos);
}
@@ -169,8 +169,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- 0, NULL, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
@@ -178,6 +177,8 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
return -1;
}
+ symbol__calc_percent(sym, evsel);
+
if (perf_gtk__is_active_context(pgctx)) {
window = pgctx->main_window;
notebook = pgctx->notebook;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index a3de7916fe63..ea0a452550b0 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -44,7 +44,7 @@ libperf-y += machine.o
libperf-y += map.o
libperf-y += pstack.o
libperf-y += session.o
-libperf-$(CONFIG_AUDIT) += syscalltbl.o
+libperf-$(CONFIG_TRACE) += syscalltbl.o
libperf-y += ordered-events.o
libperf-y += namespaces.o
libperf-y += comm.o
@@ -86,6 +86,14 @@ libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
libperf-$(CONFIG_AUXTRACE) += intel-pt.o
libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+libperf-$(CONFIG_AUXTRACE) += arm-spe.o
+libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+
+ifdef CONFIG_LIBOPENCSD
+libperf-$(CONFIG_AUXTRACE) += cs-etm.o
+libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+endif
+
libperf-y += parse-branch-options.o
libperf-y += dump-insn.o
libperf-y += parse-regs-options.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index da1c4c4a0dd8..28b233c3dcbe 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -26,7 +26,6 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <sys/utsname.h>
#include "sane_ctype.h"
@@ -165,7 +164,7 @@ static void ins__delete(struct ins_operands *ops)
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
- return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
+ return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -230,12 +229,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ops->target.name)
- return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
+ return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
if (ops->target.addr == 0)
return ins__raw_scnprintf(ins, bf, size, ops);
- return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
+ return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
@@ -299,7 +298,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
c++;
}
- return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+ return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
@@ -322,6 +321,8 @@ static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
return 0;
*addrp = strtoull(comment, &endptr, 16);
+ if (endptr == comment)
+ return 0;
name = strchr(endptr, '<');
if (name == NULL)
return -1;
@@ -372,7 +373,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
if (ops->locked.ins.ops == NULL)
return ins__raw_scnprintf(ins, bf, size, ops);
- printed = scnprintf(bf, size, "%-6.6s ", ins->name);
+ printed = scnprintf(bf, size, "%-6s ", ins->name);
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
size - printed, ops->locked.ops);
}
@@ -435,8 +436,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *m
return 0;
comment = ltrim(comment);
- comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
@@ -448,7 +449,7 @@ out_free_source:
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
- return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
+ return scnprintf(bf, size, "%-6s %s,%s", ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
@@ -480,7 +481,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
return 0;
comment = ltrim(comment);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
}
@@ -488,7 +489,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
- return scnprintf(bf, size, "%-6.6s %s", ins->name,
+ return scnprintf(bf, size, "%-6s %s", ins->name,
ops->target.name ?: ops->target.raw);
}
@@ -500,7 +501,7 @@ static struct ins_ops dec_ops = {
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
struct ins_operands *ops __maybe_unused)
{
- return scnprintf(bf, size, "%-6.6s", "nop");
+ return scnprintf(bf, size, "%-6s", "nop");
}
static struct ins_ops nop_ops = {
@@ -878,32 +879,99 @@ out_free_name:
return -1;
}
-static struct disasm_line *disasm_line__new(s64 offset, char *line,
- size_t privsize, int line_nr,
- struct arch *arch,
- struct map *map)
+struct annotate_args {
+ size_t privsize;
+ struct arch *arch;
+ struct map *map;
+ struct perf_evsel *evsel;
+ s64 offset;
+ char *line;
+ int line_nr;
+};
+
+static void annotation_line__delete(struct annotation_line *al)
{
- struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
+ void *ptr = (void *) al - al->privsize;
+
+ free_srcline(al->path);
+ zfree(&al->line);
+ free(ptr);
+}
+
+/*
+ * Allocating the annotation line data with following
+ * structure:
+ *
+ * --------------------------------------
+ * private space | struct annotation_line
+ * --------------------------------------
+ *
+ * Size of the private space is stored in 'struct annotation_line'.
+ *
+ */
+static struct annotation_line *
+annotation_line__new(struct annotate_args *args, size_t privsize)
+{
+ struct annotation_line *al;
+ struct perf_evsel *evsel = args->evsel;
+ size_t size = privsize + sizeof(*al);
+ int nr = 1;
+
+ if (perf_evsel__is_group_event(evsel))
+ nr = evsel->nr_members;
+
+ size += sizeof(al->samples[0]) * nr;
+
+ al = zalloc(size);
+ if (al) {
+ al = (void *) al + privsize;
+ al->privsize = privsize;
+ al->offset = args->offset;
+ al->line = strdup(args->line);
+ al->line_nr = args->line_nr;
+ al->samples_nr = nr;
+ }
+
+ return al;
+}
+
+/*
+ * Allocating the disasm annotation line data with
+ * following structure:
+ *
+ * ------------------------------------------------------------
+ * privsize space | struct disasm_line | struct annotation_line
+ * ------------------------------------------------------------
+ *
+ * We have 'struct annotation_line' member as last member
+ * of 'struct disasm_line' to have an easy access.
+ *
+ */
+static struct disasm_line *disasm_line__new(struct annotate_args *args)
+{
+ struct disasm_line *dl = NULL;
+ struct annotation_line *al;
+ size_t privsize = args->privsize + offsetof(struct disasm_line, al);
+
+ al = annotation_line__new(args, privsize);
+ if (al != NULL) {
+ dl = disasm_line(al);
- if (dl != NULL) {
- dl->offset = offset;
- dl->line = strdup(line);
- dl->line_nr = line_nr;
- if (dl->line == NULL)
+ if (dl->al.line == NULL)
goto out_delete;
- if (offset != -1) {
- if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
+ if (args->offset != -1) {
+ if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
- disasm_line__init_ins(dl, arch, map);
+ disasm_line__init_ins(dl, args->arch, args->map);
}
}
return dl;
out_free_line:
- zfree(&dl->line);
+ zfree(&dl->al.line);
out_delete:
free(dl);
return NULL;
@@ -911,30 +979,30 @@ out_delete:
void disasm_line__free(struct disasm_line *dl)
{
- zfree(&dl->line);
if (dl->ins.ops && dl->ins.ops->free)
dl->ins.ops->free(&dl->ops);
else
ins__delete(&dl->ops);
free((void *)dl->ins.name);
dl->ins.name = NULL;
- free(dl);
+ annotation_line__delete(&dl->al);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
{
if (raw || !dl->ins.ops)
- return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
+ return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
}
-static void disasm__add(struct list_head *head, struct disasm_line *line)
+static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
- list_add_tail(&line->node, head);
+ list_add_tail(&al->node, head);
}
-struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
+struct annotation_line *
+annotation_line__next(struct annotation_line *pos, struct list_head *head)
{
list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0)
@@ -943,50 +1011,6 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
return NULL;
}
-double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
- s64 end, const char **path, struct sym_hist_entry *sample)
-{
- struct source_line *src_line = notes->src->lines;
- double percent = 0.0;
-
- sample->nr_samples = sample->period = 0;
-
- if (src_line) {
- size_t sizeof_src_line = sizeof(*src_line) +
- sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
-
- while (offset < end) {
- src_line = (void *)notes->src->lines +
- (sizeof_src_line * offset);
-
- if (*path == NULL)
- *path = src_line->path;
-
- percent += src_line->samples[evidx].percent;
- sample->nr_samples += src_line->samples[evidx].nr;
- offset++;
- }
- } else {
- struct sym_hist *h = annotation__histogram(notes, evidx);
- unsigned int hits = 0;
- u64 period = 0;
-
- while (offset < end) {
- hits += h->addr[offset].nr_samples;
- period += h->addr[offset].period;
- ++offset;
- }
-
- if (h->nr_samples) {
- sample->period = period;
- sample->nr_samples = hits;
- percent = 100.0 * hits / h->nr_samples;
- }
- }
-
- return percent;
-}
-
static const char *annotate__address_color(struct block_range *br)
{
double cov = block_range__coverage(br);
@@ -1069,50 +1093,39 @@ static void annotate__branch_printf(struct block_range *br, u64 addr)
}
}
+static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
+{
+ s64 offset = dl->al.offset;
+ const u64 addr = start + offset;
+ struct block_range *br;
+
+ br = block_range__find(addr);
+ color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
+ color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
+ annotate__branch_printf(br, addr);
+ return 0;
+}
-static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
- struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
- int max_lines, struct disasm_line *queue)
+static int
+annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
+ struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
+ int max_lines, struct annotation_line *queue, int addr_fmt_width)
{
+ struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
static const char *prev_color;
- if (dl->offset != -1) {
- const char *path = NULL;
- double percent, max_percent = 0.0;
- double *ppercents = &percent;
- struct sym_hist_entry sample;
- struct sym_hist_entry *psamples = &sample;
+ if (al->offset != -1) {
+ double max_percent = 0.0;
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
- s64 offset = dl->offset;
- const u64 addr = start + offset;
- struct disasm_line *next;
- struct block_range *br;
-
- next = disasm__get_next_ip_line(&notes->src->source, dl);
-
- if (perf_evsel__is_group_event(evsel)) {
- nr_percent = evsel->nr_members;
- ppercents = calloc(nr_percent, sizeof(double));
- psamples = calloc(nr_percent, sizeof(struct sym_hist_entry));
- if (ppercents == NULL || psamples == NULL) {
- return -1;
- }
- }
- for (i = 0; i < nr_percent; i++) {
- percent = disasm__calc_percent(notes,
- notes->src->lines ? i : evsel->idx + i,
- offset,
- next ? next->offset : (s64) len,
- &path, &sample);
-
- ppercents[i] = percent;
- psamples[i] = sample;
- if (percent > max_percent)
- max_percent = percent;
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample = &al->samples[i];
+
+ if (sample->percent > max_percent)
+ max_percent = sample->percent;
}
if (max_percent < min_pcnt)
@@ -1123,10 +1136,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (queue != NULL) {
list_for_each_entry_from(queue, &notes->src->source, node) {
- if (queue == dl)
+ if (queue == al)
break;
- disasm_line__print(queue, sym, start, evsel, len,
- 0, 0, 1, NULL);
+ annotation_line__print(queue, sym, start, evsel, len,
+ 0, 0, 1, NULL, addr_fmt_width);
}
}
@@ -1137,44 +1150,34 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* the same color than the percentage. Don't print it
* twice for close colored addr with the same filename:line
*/
- if (path) {
- if (!prev_line || strcmp(prev_line, path)
+ if (al->path) {
+ if (!prev_line || strcmp(prev_line, al->path)
|| color != prev_color) {
- color_fprintf(stdout, color, " %s", path);
- prev_line = path;
+ color_fprintf(stdout, color, " %s", al->path);
+ prev_line = al->path;
prev_color = color;
}
}
for (i = 0; i < nr_percent; i++) {
- percent = ppercents[i];
- sample = psamples[i];
- color = get_percent_color(percent);
+ struct annotation_data *sample = &al->samples[i];
+
+ color = get_percent_color(sample->percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %11" PRIu64,
- sample.period);
+ sample->he.period);
else if (symbol_conf.show_nr_samples)
color_fprintf(stdout, color, " %7" PRIu64,
- sample.nr_samples);
+ sample->he.nr_samples);
else
- color_fprintf(stdout, color, " %7.2f", percent);
+ color_fprintf(stdout, color, " %7.2f", sample->percent);
}
- printf(" : ");
+ printf(" : ");
- br = block_range__find(addr);
- color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr);
- color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
- annotate__branch_printf(br, addr);
+ disasm_line__print(dl, start, addr_fmt_width);
printf("\n");
-
- if (ppercents != &percent)
- free(ppercents);
-
- if (psamples != &sample)
- free(psamples);
-
} else if (max_lines && printed >= max_lines)
return 1;
else {
@@ -1186,10 +1189,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
- if (!*dl->line)
+ if (!*al->line)
printf(" %*s:\n", width, " ");
else
- printf(" %*s: %s\n", width, " ", dl->line);
+ printf(" %*s: %*s %s\n", width, " ", addr_fmt_width, " ", al->line);
}
return 0;
@@ -1215,11 +1218,11 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
-static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
- struct arch *arch,
- FILE *file, size_t privsize,
+static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
+ struct annotate_args *args,
int *line_nr)
{
+ struct map *map = args->map;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
char *line = NULL, *parsed_line, *tmp, *tmp2;
@@ -1263,7 +1266,11 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
parsed_line = tmp2 + 1;
}
- dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
+ args->offset = offset;
+ args->line = parsed_line;
+ args->line_nr = *line_nr;
+
+ dl = disasm_line__new(args);
free(line);
(*line_nr)++;
@@ -1288,7 +1295,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
dl->ops.target.name = strdup(target.sym->name);
}
- disasm__add(&notes->src->source, dl);
+ annotation_line__add(&dl->al, &notes->src->source);
return 0;
}
@@ -1305,19 +1312,19 @@ static void delete_last_nop(struct symbol *sym)
struct disasm_line *dl;
while (!list_empty(list)) {
- dl = list_entry(list->prev, struct disasm_line, node);
+ dl = list_entry(list->prev, struct disasm_line, al.node);
if (dl->ins.ops) {
if (dl->ins.ops != &nop_ops)
return;
} else {
- if (!strstr(dl->line, " nop ") &&
- !strstr(dl->line, " nopl ") &&
- !strstr(dl->line, " nopw "))
+ if (!strstr(dl->al.line, " nop ") &&
+ !strstr(dl->al.line, " nopl ") &&
+ !strstr(dl->al.line, " nopw "))
return;
}
- list_del(&dl->node);
+ list_del(&dl->al.node);
disasm_line__free(dl);
}
}
@@ -1412,25 +1419,11 @@ fallback:
return 0;
}
-static const char *annotate__norm_arch(const char *arch_name)
-{
- struct utsname uts;
-
- if (!arch_name) { /* Assume we are annotating locally. */
- if (uname(&uts) < 0)
- return NULL;
- arch_name = uts.machine;
- }
- return normalize_arch((char *)arch_name);
-}
-
-int symbol__disassemble(struct symbol *sym, struct map *map,
- const char *arch_name, size_t privsize,
- struct arch **parch, char *cpuid)
+static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
+ struct map *map = args->map;
struct dso *dso = map->dso;
char command[PATH_MAX * 2];
- struct arch *arch = NULL;
FILE *file;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
@@ -1444,25 +1437,6 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
if (err)
return err;
- arch_name = annotate__norm_arch(arch_name);
- if (!arch_name)
- return -1;
-
- arch = arch__find(arch_name);
- if (arch == NULL)
- return -ENOTSUP;
-
- if (parch)
- *parch = arch;
-
- if (arch->init) {
- err = arch->init(arch, cpuid);
- if (err) {
- pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
- return err;
- }
- }
-
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
@@ -1546,8 +1520,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
- if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
- &lineno) < 0)
+ if (symbol__parse_objdump_line(sym, file, args, &lineno) < 0)
break;
nline++;
}
@@ -1580,21 +1553,110 @@ out_close_stdout:
goto out_remove_tmp;
}
-static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+static void calc_percent(struct sym_hist *hist,
+ struct annotation_data *sample,
+ s64 offset, s64 end)
+{
+ unsigned int hits = 0;
+ u64 period = 0;
+
+ while (offset < end) {
+ hits += hist->addr[offset].nr_samples;
+ period += hist->addr[offset].period;
+ ++offset;
+ }
+
+ if (hist->nr_samples) {
+ sample->he.period = period;
+ sample->he.nr_samples = hits;
+ sample->percent = 100.0 * hits / hist->nr_samples;
+ }
+}
+
+static void annotation__calc_percent(struct annotation *notes,
+ struct perf_evsel *evsel, s64 len)
+{
+ struct annotation_line *al, *next;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ s64 end;
+ int i;
+
+ if (al->offset == -1)
+ continue;
+
+ next = annotation_line__next(al, &notes->src->source);
+ end = next ? next->offset : len;
+
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample;
+ struct sym_hist *hist;
+
+ hist = annotation__histogram(notes, evsel->idx + i);
+ sample = &al->samples[i];
+
+ calc_percent(hist, sample, al->offset, end);
+ }
+ }
+}
+
+void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ annotation__calc_percent(notes, evsel, symbol__size(sym));
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, size_t privsize,
+ struct arch **parch)
{
- struct source_line *iter;
+ struct annotate_args args = {
+ .privsize = privsize,
+ .map = map,
+ .evsel = evsel,
+ };
+ struct perf_env *env = perf_evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+ struct arch *arch;
+ int err;
+
+ if (!arch_name)
+ return -1;
+
+ args.arch = arch = arch__find(arch_name);
+ if (arch == NULL)
+ return -ENOTSUP;
+
+ if (parch)
+ *parch = arch;
+
+ if (arch->init) {
+ err = arch->init(arch, env ? env->cpuid : NULL);
+ if (err) {
+ pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
+ return err;
+ }
+ }
+
+ return symbol__disassemble(sym, &args);
+}
+
+static void insert_source_line(struct rb_root *root, struct annotation_line *al)
+{
+ struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int i, ret;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct source_line, node);
+ iter = rb_entry(parent, struct annotation_line, rb_node);
- ret = strcmp(iter->path, src_line->path);
+ ret = strcmp(iter->path, al->path);
if (ret == 0) {
- for (i = 0; i < src_line->nr_pcnt; i++)
- iter->samples[i].percent_sum += src_line->samples[i].percent;
+ for (i = 0; i < al->samples_nr; i++)
+ iter->samples[i].percent_sum += al->samples[i].percent;
return;
}
@@ -1604,18 +1666,18 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
p = &(*p)->rb_right;
}
- for (i = 0; i < src_line->nr_pcnt; i++)
- src_line->samples[i].percent_sum = src_line->samples[i].percent;
+ for (i = 0; i < al->samples_nr; i++)
+ al->samples[i].percent_sum = al->samples[i].percent;
- rb_link_node(&src_line->node, parent, p);
- rb_insert_color(&src_line->node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
-static int cmp_source_line(struct source_line *a, struct source_line *b)
+static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
{
int i;
- for (i = 0; i < a->nr_pcnt; i++) {
+ for (i = 0; i < a->samples_nr; i++) {
if (a->samples[i].percent_sum == b->samples[i].percent_sum)
continue;
return a->samples[i].percent_sum > b->samples[i].percent_sum;
@@ -1624,135 +1686,47 @@ static int cmp_source_line(struct source_line *a, struct source_line *b)
return 0;
}
-static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
+static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
{
- struct source_line *iter;
+ struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct source_line, node);
+ iter = rb_entry(parent, struct annotation_line, rb_node);
- if (cmp_source_line(src_line, iter))
+ if (cmp_source_line(al, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
- rb_link_node(&src_line->node, parent, p);
- rb_insert_color(&src_line->node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
{
- struct source_line *src_line;
+ struct annotation_line *al;
struct rb_node *node;
node = rb_first(src_root);
while (node) {
struct rb_node *next;
- src_line = rb_entry(node, struct source_line, node);
+ al = rb_entry(node, struct annotation_line, rb_node);
next = rb_next(node);
rb_erase(node, src_root);
- __resort_source_line(dest_root, src_line);
+ __resort_source_line(dest_root, al);
node = next;
}
}
-static void symbol__free_source_line(struct symbol *sym, int len)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct source_line *src_line = notes->src->lines;
- size_t sizeof_src_line;
- int i;
-
- sizeof_src_line = sizeof(*src_line) +
- (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
-
- for (i = 0; i < len; i++) {
- free_srcline(src_line->path);
- src_line = (void *)src_line + sizeof_src_line;
- }
-
- zfree(&notes->src->lines);
-}
-
-/* Get the filename:line for the colored entries */
-static int symbol__get_source_line(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel,
- struct rb_root *root, int len)
-{
- u64 start;
- int i, k;
- int evidx = evsel->idx;
- struct source_line *src_line;
- struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evidx);
- struct rb_root tmp_root = RB_ROOT;
- int nr_pcnt = 1;
- u64 nr_samples = h->nr_samples;
- size_t sizeof_src_line = sizeof(struct source_line);
-
- if (perf_evsel__is_group_event(evsel)) {
- for (i = 1; i < evsel->nr_members; i++) {
- h = annotation__histogram(notes, evidx + i);
- nr_samples += h->nr_samples;
- }
- nr_pcnt = evsel->nr_members;
- sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
- }
-
- if (!nr_samples)
- return 0;
-
- src_line = notes->src->lines = calloc(len, sizeof_src_line);
- if (!notes->src->lines)
- return -1;
-
- start = map__rip_2objdump(map, sym->start);
-
- for (i = 0; i < len; i++) {
- u64 offset;
- double percent_max = 0.0;
-
- src_line->nr_pcnt = nr_pcnt;
-
- for (k = 0; k < nr_pcnt; k++) {
- double percent = 0.0;
-
- h = annotation__histogram(notes, evidx + k);
- nr_samples = h->addr[i].nr_samples;
- if (h->nr_samples)
- percent = 100.0 * nr_samples / h->nr_samples;
-
- if (percent > percent_max)
- percent_max = percent;
- src_line->samples[k].percent = percent;
- src_line->samples[k].nr = nr_samples;
- }
-
- if (percent_max <= 0.5)
- goto next;
-
- offset = start + i;
- src_line->path = get_srcline(map->dso, offset, NULL,
- false, true);
- insert_source_line(&tmp_root, src_line);
-
- next:
- src_line = (void *)src_line + sizeof_src_line;
- }
-
- resort_source_line(root, &tmp_root);
- return 0;
-}
-
static void print_summary(struct rb_root *root, const char *filename)
{
- struct source_line *src_line;
+ struct annotation_line *al;
struct rb_node *node;
printf("\nSorted summary for file %s\n", filename);
@@ -1770,9 +1744,9 @@ static void print_summary(struct rb_root *root, const char *filename)
char *path;
int i;
- src_line = rb_entry(node, struct source_line, node);
- for (i = 0; i < src_line->nr_pcnt; i++) {
- percent = src_line->samples[i].percent_sum;
+ al = rb_entry(node, struct annotation_line, rb_node);
+ for (i = 0; i < al->samples_nr; i++) {
+ percent = al->samples[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
@@ -1780,7 +1754,7 @@ static void print_summary(struct rb_root *root, const char *filename)
percent_max = percent;
}
- path = src_line->path;
+ path = al->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s\n", path);
@@ -1801,6 +1775,19 @@ static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
}
+static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
+{
+ char bf[32];
+ struct annotation_line *line;
+
+ list_for_each_entry_reverse(line, lines, node) {
+ if (line->offset != -1)
+ return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
+ }
+
+ return 0;
+}
+
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
int min_pcnt, int max_lines, int context)
@@ -1811,9 +1798,9 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
const char *evsel_name = perf_evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
- struct disasm_line *pos, *queue = NULL;
+ struct annotation_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
- int printed = 2, queue_len = 0;
+ int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
@@ -1844,15 +1831,21 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
if (verbose > 0)
symbol__annotate_hits(sym, evsel);
+ addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source, start);
+
list_for_each_entry(pos, &notes->src->source, node) {
+ int err;
+
if (context && queue == NULL) {
queue = pos;
queue_len = 0;
}
- switch (disasm_line__print(pos, sym, start, evsel, len,
- min_pcnt, printed, max_lines,
- queue)) {
+ err = annotation_line__print(pos, sym, start, evsel, len,
+ min_pcnt, printed, max_lines,
+ queue, addr_fmt_width);
+
+ switch (err) {
case 0:
++printed;
if (context) {
@@ -1907,13 +1900,13 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
}
}
-void disasm__purge(struct list_head *head)
+void annotated_source__purge(struct annotated_source *as)
{
- struct disasm_line *pos, *n;
+ struct annotation_line *al, *n;
- list_for_each_entry_safe(pos, n, head, node) {
- list_del(&pos->node);
- disasm_line__free(pos);
+ list_for_each_entry_safe(al, n, &as->source, node) {
+ list_del(&al->node);
+ disasm_line__free(disasm_line(al));
}
}
@@ -1921,10 +1914,10 @@ static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
- if (dl->offset == -1)
- return fprintf(fp, "%s\n", dl->line);
+ if (dl->al.offset == -1)
+ return fprintf(fp, "%s\n", dl->al.line);
- printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
+ printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
@@ -1939,38 +1932,73 @@ size_t disasm__fprintf(struct list_head *head, FILE *fp)
struct disasm_line *pos;
size_t printed = 0;
- list_for_each_entry(pos, head, node)
+ list_for_each_entry(pos, head, al.node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
+static void annotation__calc_lines(struct annotation *notes, struct map *map,
+ struct rb_root *root, u64 start)
+{
+ struct annotation_line *al;
+ struct rb_root tmp_root = RB_ROOT;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ double percent_max = 0.0;
+ int i;
+
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample;
+
+ sample = &al->samples[i];
+
+ if (sample->percent > percent_max)
+ percent_max = sample->percent;
+ }
+
+ if (percent_max <= 0.5)
+ continue;
+
+ al->path = get_srcline(map->dso, start + al->offset, NULL,
+ false, true, start + al->offset);
+ insert_source_line(&tmp_root, al);
+ }
+
+ resort_source_line(root, &tmp_root);
+}
+
+static void symbol__calc_lines(struct symbol *sym, struct map *map,
+ struct rb_root *root)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ u64 start = map__rip_2objdump(map, sym->start);
+
+ annotation__calc_lines(notes, map, root, start);
+}
+
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
- u64 len;
- if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- 0, NULL, NULL) < 0)
+ if (symbol__annotate(sym, map, evsel, 0, NULL) < 0)
return -1;
- len = symbol__size(sym);
+ symbol__calc_percent(sym, evsel);
if (print_lines) {
srcline_full_filename = full_paths;
- symbol__get_source_line(sym, map, evsel, &source_line, len);
+ symbol__calc_lines(sym, map, &source_line);
print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(sym, map, evsel, full_paths,
min_pcnt, max_lines, 0);
- if (print_lines)
- symbol__free_source_line(sym, len);
- disasm__purge(&symbol__annotation(sym)->src->source);
+ annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index f6ba3560de5e..ce427445671f 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -59,33 +59,55 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
struct annotation;
+struct sym_hist_entry {
+ u64 nr_samples;
+ u64 period;
+};
+
+struct annotation_data {
+ double percent;
+ double percent_sum;
+ struct sym_hist_entry he;
+};
+
+struct annotation_line {
+ struct list_head node;
+ struct rb_node rb_node;
+ s64 offset;
+ char *line;
+ int line_nr;
+ float ipc;
+ u64 cycles;
+ size_t privsize;
+ char *path;
+ int samples_nr;
+ struct annotation_data samples[0];
+};
+
struct disasm_line {
- struct list_head node;
- s64 offset;
- char *line;
- struct ins ins;
- int line_nr;
- float ipc;
- u64 cycles;
- struct ins_operands ops;
+ struct ins ins;
+ struct ins_operands ops;
+
+ /* This needs to be at the end. */
+ struct annotation_line al;
};
+static inline struct disasm_line *disasm_line(struct annotation_line *al)
+{
+ return al ? container_of(al, struct disasm_line, al) : NULL;
+}
+
static inline bool disasm_line__has_offset(const struct disasm_line *dl)
{
return dl->ops.target.offset_avail;
}
-struct sym_hist_entry {
- u64 nr_samples;
- u64 period;
-};
-
void disasm_line__free(struct disasm_line *dl);
-struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
+struct annotation_line *
+annotation_line__next(struct annotation_line *pos, struct list_head *head);
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
-double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
- s64 end, const char **path, struct sym_hist_entry *sample);
+void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
struct sym_hist {
u64 nr_samples;
@@ -104,19 +126,6 @@ struct cyc_hist {
u16 reset;
};
-struct source_line_samples {
- double percent;
- double percent_sum;
- u64 nr;
-};
-
-struct source_line {
- struct rb_node node;
- char *path;
- int nr_pcnt;
- struct source_line_samples samples[1];
-};
-
/** struct annotated_source - symbols with hits have this attached as in sannotation
*
* @histogram: Array of addr hit histograms per event being monitored
@@ -132,7 +141,6 @@ struct source_line {
*/
struct annotated_source {
struct list_head source;
- struct source_line *lines;
int nr_histograms;
size_t sizeof_sym_hist;
struct cyc_hist *cycles_hist;
@@ -169,9 +177,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
-int symbol__disassemble(struct symbol *sym, struct map *map,
- const char *arch_name, size_t privsize,
- struct arch **parch, char *cpuid);
+int symbol__annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, size_t privsize,
+ struct arch **parch);
enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,
@@ -198,7 +206,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
int min_pcnt, int max_lines, int context);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
-void disasm__purge(struct list_head *head);
+void annotated_source__purge(struct annotated_source *as);
bool ui__has_annotation(void);
diff --git a/tools/perf/util/arm-spe-pkt-decoder.c b/tools/perf/util/arm-spe-pkt-decoder.c
new file mode 100644
index 000000000000..b94001b756c7
--- /dev/null
+++ b/tools/perf/util/arm-spe-pkt-decoder.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#include "arm-spe-pkt-decoder.h"
+
+#define BIT(n) (1ULL << (n))
+
+#define NS_FLAG BIT(63)
+#define EL_FLAG (BIT(62) | BIT(61))
+
+#define SPE_HEADER0_PAD 0x0
+#define SPE_HEADER0_END 0x1
+#define SPE_HEADER0_ADDRESS 0x30 /* address packet (short) */
+#define SPE_HEADER0_ADDRESS_MASK 0x38
+#define SPE_HEADER0_COUNTER 0x18 /* counter packet (short) */
+#define SPE_HEADER0_COUNTER_MASK 0x38
+#define SPE_HEADER0_TIMESTAMP 0x71
+#define SPE_HEADER0_TIMESTAMP 0x71
+#define SPE_HEADER0_EVENTS 0x2
+#define SPE_HEADER0_EVENTS_MASK 0xf
+#define SPE_HEADER0_SOURCE 0x3
+#define SPE_HEADER0_SOURCE_MASK 0xf
+#define SPE_HEADER0_CONTEXT 0x24
+#define SPE_HEADER0_CONTEXT_MASK 0x3c
+#define SPE_HEADER0_OP_TYPE 0x8
+#define SPE_HEADER0_OP_TYPE_MASK 0x3c
+#define SPE_HEADER1_ALIGNMENT 0x0
+#define SPE_HEADER1_ADDRESS 0xb0 /* address packet (extended) */
+#define SPE_HEADER1_ADDRESS_MASK 0xf8
+#define SPE_HEADER1_COUNTER 0x98 /* counter packet (extended) */
+#define SPE_HEADER1_COUNTER_MASK 0xf8
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define le16_to_cpu bswap_16
+#define le32_to_cpu bswap_32
+#define le64_to_cpu bswap_64
+#define memcpy_le64(d, s, n) do { \
+ memcpy((d), (s), (n)); \
+ *(d) = le64_to_cpu(*(d)); \
+} while (0)
+#else
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define memcpy_le64 memcpy
+#endif
+
+static const char * const arm_spe_packet_name[] = {
+ [ARM_SPE_PAD] = "PAD",
+ [ARM_SPE_END] = "END",
+ [ARM_SPE_TIMESTAMP] = "TS",
+ [ARM_SPE_ADDRESS] = "ADDR",
+ [ARM_SPE_COUNTER] = "LAT",
+ [ARM_SPE_CONTEXT] = "CONTEXT",
+ [ARM_SPE_OP_TYPE] = "OP-TYPE",
+ [ARM_SPE_EVENTS] = "EVENTS",
+ [ARM_SPE_DATA_SOURCE] = "DATA-SOURCE",
+};
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type type)
+{
+ return arm_spe_packet_name[type];
+}
+
+/* return ARM SPE payload size from its encoding,
+ * which is in bits 5:4 of the byte.
+ * 00 : byte
+ * 01 : halfword (2)
+ * 10 : word (4)
+ * 11 : doubleword (8)
+ */
+static int payloadlen(unsigned char byte)
+{
+ return 1 << ((byte & 0x30) >> 4);
+}
+
+static int arm_spe_get_payload(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ size_t payload_len = payloadlen(buf[0]);
+
+ if (len < 1 + payload_len)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ buf++;
+
+ switch (payload_len) {
+ case 1: packet->payload = *(uint8_t *)buf; break;
+ case 2: packet->payload = le16_to_cpu(*(uint16_t *)buf); break;
+ case 4: packet->payload = le32_to_cpu(*(uint32_t *)buf); break;
+ case 8: packet->payload = le64_to_cpu(*(uint64_t *)buf); break;
+ default: return ARM_SPE_BAD_PACKET;
+ }
+
+ return 1 + payload_len;
+}
+
+static int arm_spe_get_pad(struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_PAD;
+ return 1;
+}
+
+static int arm_spe_get_alignment(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ unsigned int alignment = 1 << ((buf[0] & 0xf) + 1);
+
+ if (len < alignment)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_PAD;
+ return alignment - (((uintptr_t)buf) & (alignment - 1));
+}
+
+static int arm_spe_get_end(struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_END;
+ return 1;
+}
+
+static int arm_spe_get_timestamp(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_TIMESTAMP;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_events(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ int ret = arm_spe_get_payload(buf, len, packet);
+
+ packet->type = ARM_SPE_EVENTS;
+
+ /* we use index to identify Events with a less number of
+ * comparisons in arm_spe_pkt_desc(): E.g., the LLC-ACCESS,
+ * LLC-REFILL, and REMOTE-ACCESS events are identified iff
+ * index > 1.
+ */
+ packet->index = ret - 1;
+
+ return ret;
+}
+
+static int arm_spe_get_data_source(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_DATA_SOURCE;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_context(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_CONTEXT;
+ packet->index = buf[0] & 0x3;
+
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_op_type(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_OP_TYPE;
+ packet->index = buf[0] & 0x3;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_counter(const unsigned char *buf, size_t len,
+ const unsigned char ext_hdr, struct arm_spe_pkt *packet)
+{
+ if (len < 2)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_COUNTER;
+ if (ext_hdr)
+ packet->index = ((buf[0] & 0x3) << 3) | (buf[1] & 0x7);
+ else
+ packet->index = buf[0] & 0x7;
+
+ packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
+
+ return 1 + ext_hdr + 2;
+}
+
+static int arm_spe_get_addr(const unsigned char *buf, size_t len,
+ const unsigned char ext_hdr, struct arm_spe_pkt *packet)
+{
+ if (len < 8)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_ADDRESS;
+ if (ext_hdr)
+ packet->index = ((buf[0] & 0x3) << 3) | (buf[1] & 0x7);
+ else
+ packet->index = buf[0] & 0x7;
+
+ memcpy_le64(&packet->payload, buf + 1, 8);
+
+ return 1 + ext_hdr + 8;
+}
+
+static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ unsigned int byte;
+
+ memset(packet, 0, sizeof(struct arm_spe_pkt));
+
+ if (!len)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ byte = buf[0];
+ if (byte == SPE_HEADER0_PAD)
+ return arm_spe_get_pad(packet);
+ else if (byte == SPE_HEADER0_END) /* no timestamp at end of record */
+ return arm_spe_get_end(packet);
+ else if (byte & 0xc0 /* 0y11xxxxxx */) {
+ if (byte & 0x80) {
+ if ((byte & SPE_HEADER0_ADDRESS_MASK) == SPE_HEADER0_ADDRESS)
+ return arm_spe_get_addr(buf, len, 0, packet);
+ if ((byte & SPE_HEADER0_COUNTER_MASK) == SPE_HEADER0_COUNTER)
+ return arm_spe_get_counter(buf, len, 0, packet);
+ } else
+ if (byte == SPE_HEADER0_TIMESTAMP)
+ return arm_spe_get_timestamp(buf, len, packet);
+ else if ((byte & SPE_HEADER0_EVENTS_MASK) == SPE_HEADER0_EVENTS)
+ return arm_spe_get_events(buf, len, packet);
+ else if ((byte & SPE_HEADER0_SOURCE_MASK) == SPE_HEADER0_SOURCE)
+ return arm_spe_get_data_source(buf, len, packet);
+ else if ((byte & SPE_HEADER0_CONTEXT_MASK) == SPE_HEADER0_CONTEXT)
+ return arm_spe_get_context(buf, len, packet);
+ else if ((byte & SPE_HEADER0_OP_TYPE_MASK) == SPE_HEADER0_OP_TYPE)
+ return arm_spe_get_op_type(buf, len, packet);
+ } else if ((byte & 0xe0) == 0x20 /* 0y001xxxxx */) {
+ /* 16-bit header */
+ byte = buf[1];
+ if (byte == SPE_HEADER1_ALIGNMENT)
+ return arm_spe_get_alignment(buf, len, packet);
+ else if ((byte & SPE_HEADER1_ADDRESS_MASK) == SPE_HEADER1_ADDRESS)
+ return arm_spe_get_addr(buf, len, 1, packet);
+ else if ((byte & SPE_HEADER1_COUNTER_MASK) == SPE_HEADER1_COUNTER)
+ return arm_spe_get_counter(buf, len, 1, packet);
+ }
+
+ return ARM_SPE_BAD_PACKET;
+}
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ int ret;
+
+ ret = arm_spe_do_get_packet(buf, len, packet);
+ /* put multiple consecutive PADs on the same line, up to
+ * the fixed-width output format of 16 bytes per line.
+ */
+ if (ret > 0 && packet->type == ARM_SPE_PAD) {
+ while (ret < 16 && len > (size_t)ret && !buf[ret])
+ ret += 1;
+ }
+ return ret;
+}
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf,
+ size_t buf_len)
+{
+ int ret, ns, el, idx = packet->index;
+ unsigned long long payload = packet->payload;
+ const char *name = arm_spe_pkt_name(packet->type);
+
+ switch (packet->type) {
+ case ARM_SPE_BAD:
+ case ARM_SPE_PAD:
+ case ARM_SPE_END:
+ return snprintf(buf, buf_len, "%s", name);
+ case ARM_SPE_EVENTS: {
+ size_t blen = buf_len;
+
+ ret = 0;
+ ret = snprintf(buf, buf_len, "EV");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x1) {
+ ret = snprintf(buf, buf_len, " EXCEPTION-GEN");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x2) {
+ ret = snprintf(buf, buf_len, " RETIRED");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " L1D-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x8) {
+ ret = snprintf(buf, buf_len, " L1D-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x10) {
+ ret = snprintf(buf, buf_len, " TLB-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x20) {
+ ret = snprintf(buf, buf_len, " TLB-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x40) {
+ ret = snprintf(buf, buf_len, " NOT-TAKEN");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x80) {
+ ret = snprintf(buf, buf_len, " MISPRED");
+ buf += ret;
+ blen -= ret;
+ }
+ if (idx > 1) {
+ if (payload & 0x100) {
+ ret = snprintf(buf, buf_len, " LLC-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x200) {
+ ret = snprintf(buf, buf_len, " LLC-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x400) {
+ ret = snprintf(buf, buf_len, " REMOTE-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ case ARM_SPE_OP_TYPE:
+ switch (idx) {
+ case 0: return snprintf(buf, buf_len, "%s", payload & 0x1 ?
+ "COND-SELECT" : "INSN-OTHER");
+ case 1: {
+ size_t blen = buf_len;
+
+ if (payload & 0x1)
+ ret = snprintf(buf, buf_len, "ST");
+ else
+ ret = snprintf(buf, buf_len, "LD");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x2) {
+ if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " AT");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x8) {
+ ret = snprintf(buf, buf_len, " EXCL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x10) {
+ ret = snprintf(buf, buf_len, " AR");
+ buf += ret;
+ blen -= ret;
+ }
+ } else if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " SIMD-FP");
+ buf += ret;
+ blen -= ret;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ case 2: {
+ size_t blen = buf_len;
+
+ ret = snprintf(buf, buf_len, "B");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x1) {
+ ret = snprintf(buf, buf_len, " COND");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x2) {
+ ret = snprintf(buf, buf_len, " IND");
+ buf += ret;
+ blen -= ret;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ default: return 0;
+ }
+ case ARM_SPE_DATA_SOURCE:
+ case ARM_SPE_TIMESTAMP:
+ return snprintf(buf, buf_len, "%s %lld", name, payload);
+ case ARM_SPE_ADDRESS:
+ switch (idx) {
+ case 0:
+ case 1: ns = !!(packet->payload & NS_FLAG);
+ el = (packet->payload & EL_FLAG) >> 61;
+ payload &= ~(0xffULL << 56);
+ return snprintf(buf, buf_len, "%s 0x%llx el%d ns=%d",
+ (idx == 1) ? "TGT" : "PC", payload, el, ns);
+ case 2: return snprintf(buf, buf_len, "VA 0x%llx", payload);
+ case 3: ns = !!(packet->payload & NS_FLAG);
+ payload &= ~(0xffULL << 56);
+ return snprintf(buf, buf_len, "PA 0x%llx ns=%d",
+ payload, ns);
+ default: return 0;
+ }
+ case ARM_SPE_CONTEXT:
+ return snprintf(buf, buf_len, "%s 0x%lx el%d", name,
+ (unsigned long)payload, idx + 1);
+ case ARM_SPE_COUNTER: {
+ size_t blen = buf_len;
+
+ ret = snprintf(buf, buf_len, "%s %d ", name,
+ (unsigned short)payload);
+ buf += ret;
+ blen -= ret;
+ switch (idx) {
+ case 0: ret = snprintf(buf, buf_len, "TOT"); break;
+ case 1: ret = snprintf(buf, buf_len, "ISSUE"); break;
+ case 2: ret = snprintf(buf, buf_len, "XLAT"); break;
+ default: ret = 0;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ default:
+ break;
+ }
+
+ return snprintf(buf, buf_len, "%s 0x%llx (%d)",
+ name, payload, packet->index);
+}
diff --git a/tools/perf/util/arm-spe-pkt-decoder.h b/tools/perf/util/arm-spe-pkt-decoder.h
new file mode 100644
index 000000000000..d786ef65113f
--- /dev/null
+++ b/tools/perf/util/arm-spe-pkt-decoder.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__ARM_SPE_PKT_DECODER_H__
+#define INCLUDE__ARM_SPE_PKT_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define ARM_SPE_PKT_DESC_MAX 256
+
+#define ARM_SPE_NEED_MORE_BYTES -1
+#define ARM_SPE_BAD_PACKET -2
+
+enum arm_spe_pkt_type {
+ ARM_SPE_BAD,
+ ARM_SPE_PAD,
+ ARM_SPE_END,
+ ARM_SPE_TIMESTAMP,
+ ARM_SPE_ADDRESS,
+ ARM_SPE_COUNTER,
+ ARM_SPE_CONTEXT,
+ ARM_SPE_OP_TYPE,
+ ARM_SPE_EVENTS,
+ ARM_SPE_DATA_SOURCE,
+};
+
+struct arm_spe_pkt {
+ enum arm_spe_pkt_type type;
+ unsigned char index;
+ uint64_t payload;
+};
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type);
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet);
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf, size_t len);
+#endif
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
new file mode 100644
index 000000000000..6067267cc76c
--- /dev/null
+++ b/tools/perf/util/arm-spe.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <endian.h>
+#include <errno.h>
+#include <byteswap.h>
+#include <inttypes.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "cpumap.h"
+#include "color.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "session.h"
+#include "util.h"
+#include "thread.h"
+#include "debug.h"
+#include "auxtrace.h"
+#include "arm-spe.h"
+#include "arm-spe-pkt-decoder.h"
+
+struct arm_spe {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ u32 auxtrace_type;
+ struct perf_session *session;
+ struct machine *machine;
+ u32 pmu_type;
+};
+
+struct arm_spe_queue {
+ struct arm_spe *spe;
+ unsigned int queue_nr;
+ struct auxtrace_buffer *buffer;
+ bool on_heap;
+ bool done;
+ pid_t pid;
+ pid_t tid;
+ int cpu;
+};
+
+static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
+ unsigned char *buf, size_t len)
+{
+ struct arm_spe_pkt packet;
+ size_t pos = 0;
+ int ret, pkt_len, i;
+ char desc[ARM_SPE_PKT_DESC_MAX];
+ const char *color = PERF_COLOR_BLUE;
+
+ color_fprintf(stdout, color,
+ ". ... ARM SPE data: size %zu bytes\n",
+ len);
+
+ while (len) {
+ ret = arm_spe_get_packet(buf, len, &packet);
+ if (ret > 0)
+ pkt_len = ret;
+ else
+ pkt_len = 1;
+ printf(".");
+ color_fprintf(stdout, color, " %08x: ", pos);
+ for (i = 0; i < pkt_len; i++)
+ color_fprintf(stdout, color, " %02x", buf[i]);
+ for (; i < 16; i++)
+ color_fprintf(stdout, color, " ");
+ if (ret > 0) {
+ ret = arm_spe_pkt_desc(&packet, desc,
+ ARM_SPE_PKT_DESC_MAX);
+ if (ret > 0)
+ color_fprintf(stdout, color, " %s\n", desc);
+ } else {
+ color_fprintf(stdout, color, " Bad packet!\n");
+ }
+ pos += pkt_len;
+ buf += pkt_len;
+ len -= pkt_len;
+ }
+}
+
+static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
+ size_t len)
+{
+ printf(".\n");
+ arm_spe_dump(spe, buf, len);
+}
+
+static int arm_spe_process_event(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static int arm_spe_process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ int err;
+
+ if (perf_data__is_pipe(session->data)) {
+ data_offset = 0;
+ } else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&spe->queues, session, event,
+ data_offset, &buffer);
+ if (err)
+ return err;
+
+ /* Dump here now we have copied a piped trace out of the pipe */
+ if (dump_trace) {
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ arm_spe_dump_event(spe, buffer->data,
+ buffer->size);
+ auxtrace_buffer__put_data(buffer);
+ }
+ }
+
+ return 0;
+}
+
+static int arm_spe_flush(struct perf_session *session __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static void arm_spe_free_queue(void *priv)
+{
+ struct arm_spe_queue *speq = priv;
+
+ if (!speq)
+ return;
+ free(speq);
+}
+
+static void arm_spe_free_events(struct perf_session *session)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ struct auxtrace_queues *queues = &spe->queues;
+ unsigned int i;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ arm_spe_free_queue(queues->queue_array[i].priv);
+ queues->queue_array[i].priv = NULL;
+ }
+ auxtrace_queues__free(queues);
+}
+
+static void arm_spe_free(struct perf_session *session)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+
+ auxtrace_heap__free(&spe->heap);
+ arm_spe_free_events(session);
+ session->auxtrace = NULL;
+ free(spe);
+}
+
+static const char * const arm_spe_info_fmts[] = {
+ [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
+};
+
+static void arm_spe_print_info(u64 *arr)
+{
+ if (!dump_trace)
+ return;
+
+ fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
+}
+
+int arm_spe_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+ size_t min_sz = sizeof(u64) * ARM_SPE_PMU_TYPE;
+ struct arm_spe *spe;
+ int err;
+
+ if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
+ min_sz)
+ return -EINVAL;
+
+ spe = zalloc(sizeof(struct arm_spe));
+ if (!spe)
+ return -ENOMEM;
+
+ err = auxtrace_queues__init(&spe->queues);
+ if (err)
+ goto err_free;
+
+ spe->session = session;
+ spe->machine = &session->machines.host; /* No kvm support */
+ spe->auxtrace_type = auxtrace_info->type;
+ spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+
+ spe->auxtrace.process_event = arm_spe_process_event;
+ spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
+ spe->auxtrace.flush_events = arm_spe_flush;
+ spe->auxtrace.free_events = arm_spe_free_events;
+ spe->auxtrace.free = arm_spe_free;
+ session->auxtrace = &spe->auxtrace;
+
+ arm_spe_print_info(&auxtrace_info->priv[0]);
+
+ return 0;
+
+err_free:
+ free(spe);
+ return err;
+}
diff --git a/tools/perf/util/arm-spe.h b/tools/perf/util/arm-spe.h
new file mode 100644
index 000000000000..98d3235781c3
--- /dev/null
+++ b/tools/perf/util/arm-spe.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__PERF_ARM_SPE_H__
+#define INCLUDE__PERF_ARM_SPE_H__
+
+#define ARM_SPE_PMU_NAME "arm_spe_"
+
+enum {
+ ARM_SPE_PMU_TYPE,
+ ARM_SPE_PER_CPU_MMAPS,
+ ARM_SPE_AUXTRACE_PRIV_MAX,
+};
+
+#define ARM_SPE_AUXTRACE_PRIV_SIZE (ARM_SPE_AUXTRACE_PRIV_MAX * sizeof(u64))
+
+union perf_event;
+struct perf_session;
+struct perf_pmu;
+
+struct auxtrace_record *arm_spe_recording_init(int *err,
+ struct perf_pmu *arm_spe_pmu);
+
+int arm_spe_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+
+struct perf_event_attr *arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu);
+#endif
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a33491416400..9faf3b5367db 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -31,9 +31,6 @@
#include <sys/param.h>
#include <stdlib.h>
#include <stdio.h>
-#include <string.h>
-#include <limits.h>
-#include <errno.h>
#include <linux/list.h>
#include "../perf.h"
@@ -55,8 +52,10 @@
#include "debug.h"
#include <subcmd/parse-options.h>
+#include "cs-etm.h"
#include "intel-pt.h"
#include "intel-bts.h"
+#include "arm-spe.h"
#include "sane_ctype.h"
#include "symbol/kallsyms.h"
@@ -913,7 +912,10 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
return intel_pt_process_auxtrace_info(event, session);
case PERF_AUXTRACE_INTEL_BTS:
return intel_bts_process_auxtrace_info(event, session);
+ case PERF_AUXTRACE_ARM_SPE:
+ return arm_spe_process_auxtrace_info(event, session);
case PERF_AUXTRACE_CS_ETM:
+ return cs_etm__process_auxtrace_info(event, session);
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index d19e11b68de7..453c148d2158 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -43,6 +43,7 @@ enum auxtrace_type {
PERF_AUXTRACE_INTEL_PT,
PERF_AUXTRACE_INTEL_BTS,
PERF_AUXTRACE_CS_ETM,
+ PERF_AUXTRACE_ARM_SPE,
};
enum itrace_period_type {
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 72c107fcbc5a..af7ad814b2c3 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -94,7 +94,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
perf_clang__cleanup();
if (err) {
- pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err);
+ pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
if (err)
return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
@@ -1533,7 +1533,7 @@ int bpf__apply_obj_config(void)
(strcmp("__bpf_stdout__", \
bpf_map__name(pos)) == 0))
-int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+int bpf__setup_stdout(struct perf_evlist *evlist)
{
struct bpf_map_priv *tmpl_priv = NULL;
struct bpf_object *obj, *tmp;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 082505d08d72..32ef7bdca1cf 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -37,6 +37,15 @@ struct callchain_param callchain_param = {
CALLCHAIN_PARAM_DEFAULT
};
+/*
+ * Are there any events usind DWARF callchains?
+ *
+ * I.e.
+ *
+ * -e cycles/call-graph=dwarf/
+ */
+bool dwarf_callchain_users;
+
struct callchain_param callchain_param_default = {
CALLCHAIN_PARAM_DEFAULT
};
@@ -265,6 +274,7 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
ret = 0;
param->record_mode = CALLCHAIN_DWARF;
param->dump_size = default_stack_dump_size;
+ dwarf_callchain_users = true;
tok = strtok_r(NULL, ",", &saveptr);
if (tok) {
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index b79ef2478a57..154560b1eb65 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -89,6 +89,8 @@ enum chain_value {
CCVAL_COUNT,
};
+extern bool dwarf_callchain_users;
+
struct callchain_param {
bool enabled;
enum perf_call_graph_mode record_mode;
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index d9ffc1e6eb39..984f69144f87 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -6,6 +6,9 @@
#include "cgroup.h"
#include "evlist.h"
#include <linux/stringify.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
int nr_cgroups;
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
new file mode 100644
index 000000000000..bc22c39c727f
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -0,0 +1 @@
+libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
new file mode 100644
index 000000000000..1fb01849f1c7
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -0,0 +1,513 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <stdlib.h>
+#include <opencsd/c_api/opencsd_c_api.h>
+#include <opencsd/etmv4/trc_pkt_types_etmv4.h>
+#include <opencsd/ocsd_if_types.h>
+
+#include "cs-etm.h"
+#include "cs-etm-decoder.h"
+#include "intlist.h"
+#include "util.h"
+
+#define MAX_BUFFER 1024
+
+/* use raw logging */
+#ifdef CS_DEBUG_RAW
+#define CS_LOG_RAW_FRAMES
+#ifdef CS_RAW_PACKED
+#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
+ OCSD_DFRMTR_PACKED_RAW_OUT)
+#else
+#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
+#endif
+#endif
+
+struct cs_etm_decoder {
+ void *data;
+ void (*packet_printer)(const char *msg);
+ bool trace_on;
+ dcd_tree_handle_t dcd_tree;
+ cs_etm_mem_cb_type mem_access;
+ ocsd_datapath_resp_t prev_return;
+ u32 packet_count;
+ u32 head;
+ u32 tail;
+ struct cs_etm_packet packet_buffer[MAX_BUFFER];
+};
+
+static u32
+cs_etm_decoder__mem_access(const void *context,
+ const ocsd_vaddr_t address,
+ const ocsd_mem_space_acc_t mem_space __maybe_unused,
+ const u32 req_size,
+ u8 *buffer)
+{
+ struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
+
+ return decoder->mem_access(decoder->data,
+ address,
+ req_size,
+ buffer);
+}
+
+int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
+ u64 start, u64 end,
+ cs_etm_mem_cb_type cb_func)
+{
+ decoder->mem_access = cb_func;
+
+ if (ocsd_dt_add_callback_mem_acc(decoder->dcd_tree, start, end,
+ OCSD_MEM_SPACE_ANY,
+ cs_etm_decoder__mem_access, decoder))
+ return -1;
+
+ return 0;
+}
+
+int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
+{
+ ocsd_datapath_resp_t dp_ret;
+
+ dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
+ 0, 0, NULL, NULL);
+ if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
+ return -1;
+
+ return 0;
+}
+
+int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
+ struct cs_etm_packet *packet)
+{
+ if (!decoder || !packet)
+ return -EINVAL;
+
+ /* Nothing to do, might as well just return */
+ if (decoder->packet_count == 0)
+ return 0;
+
+ *packet = decoder->packet_buffer[decoder->head];
+
+ decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
+
+ decoder->packet_count--;
+
+ return 1;
+}
+
+static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
+ ocsd_etmv4_cfg *config)
+{
+ config->reg_configr = params->etmv4.reg_configr;
+ config->reg_traceidr = params->etmv4.reg_traceidr;
+ config->reg_idr0 = params->etmv4.reg_idr0;
+ config->reg_idr1 = params->etmv4.reg_idr1;
+ config->reg_idr2 = params->etmv4.reg_idr2;
+ config->reg_idr8 = params->etmv4.reg_idr8;
+ config->reg_idr9 = 0;
+ config->reg_idr10 = 0;
+ config->reg_idr11 = 0;
+ config->reg_idr12 = 0;
+ config->reg_idr13 = 0;
+ config->arch_ver = ARCH_V8;
+ config->core_prof = profile_CortexA;
+}
+
+static void cs_etm_decoder__print_str_cb(const void *p_context,
+ const char *msg,
+ const int str_len)
+{
+ if (p_context && str_len)
+ ((struct cs_etm_decoder *)p_context)->packet_printer(msg);
+}
+
+static int
+cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_decoder *decoder)
+{
+ int ret = 0;
+
+ if (d_params->packet_printer == NULL)
+ return -1;
+
+ decoder->packet_printer = d_params->packet_printer;
+
+ /*
+ * Set up a library default logger to process any printers
+ * (packet/raw frame) we add later.
+ */
+ ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
+ if (ret != 0)
+ return -1;
+
+ /* no stdout / err / file output */
+ ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
+ if (ret != 0)
+ return -1;
+
+ /*
+ * Set the string CB for the default logger, passes strings to
+ * perf print logger.
+ */
+ ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
+ (void *)decoder,
+ cs_etm_decoder__print_str_cb);
+ if (ret != 0)
+ ret = -1;
+
+ return 0;
+}
+
+#ifdef CS_LOG_RAW_FRAMES
+static void
+cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_decoder *decoder)
+{
+ /* Only log these during a --dump operation */
+ if (d_params->operation == CS_ETM_OPERATION_PRINT) {
+ /* set up a library default logger to process the
+ * raw frame printer we add later
+ */
+ ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
+
+ /* no stdout / err / file output */
+ ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
+
+ /* set the string CB for the default logger,
+ * passes strings to perf print logger.
+ */
+ ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
+ (void *)decoder,
+ cs_etm_decoder__print_str_cb);
+
+ /* use the built in library printer for the raw frames */
+ ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
+ CS_RAW_DEBUG_FLAGS);
+ }
+}
+#else
+static void
+cs_etm_decoder__init_raw_frame_logging(
+ struct cs_etm_decoder_params *d_params __maybe_unused,
+ struct cs_etm_decoder *decoder __maybe_unused)
+{
+}
+#endif
+
+static int cs_etm_decoder__create_packet_printer(struct cs_etm_decoder *decoder,
+ const char *decoder_name,
+ void *trace_config)
+{
+ u8 csid;
+
+ if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name,
+ OCSD_CREATE_FLG_PACKET_PROC,
+ trace_config, &csid))
+ return -1;
+
+ if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
+ return -1;
+
+ return 0;
+}
+
+static int
+cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ const char *decoder_name;
+ ocsd_etmv4_cfg trace_config_etmv4;
+ void *trace_config;
+
+ switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV4i:
+ cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
+ decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
+ trace_config = &trace_config_etmv4;
+ break;
+ default:
+ return -1;
+ }
+
+ return cs_etm_decoder__create_packet_printer(decoder,
+ decoder_name,
+ trace_config);
+}
+
+static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
+{
+ int i;
+
+ decoder->head = 0;
+ decoder->tail = 0;
+ decoder->packet_count = 0;
+ for (i = 0; i < MAX_BUFFER; i++) {
+ decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].exc = false;
+ decoder->packet_buffer[i].exc_ret = false;
+ decoder->packet_buffer[i].cpu = INT_MIN;
+ }
+}
+
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
+ const u8 trace_chan_id,
+ enum cs_etm_sample_type sample_type)
+{
+ u32 et = 0;
+ struct int_node *inode = NULL;
+
+ if (decoder->packet_count >= MAX_BUFFER - 1)
+ return OCSD_RESP_FATAL_SYS_ERR;
+
+ /* Search the RB tree for the cpu associated with this traceID */
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return OCSD_RESP_FATAL_SYS_ERR;
+
+ et = decoder->tail;
+ decoder->packet_buffer[et].sample_type = sample_type;
+ decoder->packet_buffer[et].start_addr = elem->st_addr;
+ decoder->packet_buffer[et].end_addr = elem->en_addr;
+ decoder->packet_buffer[et].exc = false;
+ decoder->packet_buffer[et].exc_ret = false;
+ decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+
+ /* Wrap around if need be */
+ et = (et + 1) & (MAX_BUFFER - 1);
+
+ decoder->tail = et;
+ decoder->packet_count++;
+
+ if (decoder->packet_count == MAX_BUFFER - 1)
+ return OCSD_RESP_WAIT;
+
+ return OCSD_RESP_CONT;
+}
+
+static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
+ const void *context,
+ const ocsd_trc_index_t indx __maybe_unused,
+ const u8 trace_chan_id __maybe_unused,
+ const ocsd_generic_trace_elem *elem)
+{
+ ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
+ struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
+
+ switch (elem->elem_type) {
+ case OCSD_GEN_TRC_ELEM_UNKNOWN:
+ break;
+ case OCSD_GEN_TRC_ELEM_NO_SYNC:
+ decoder->trace_on = false;
+ break;
+ case OCSD_GEN_TRC_ELEM_TRACE_ON:
+ decoder->trace_on = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
+ resp = cs_etm_decoder__buffer_packet(decoder, elem,
+ trace_chan_id,
+ CS_ETM_RANGE);
+ break;
+ case OCSD_GEN_TRC_ELEM_EXCEPTION:
+ decoder->packet_buffer[decoder->tail].exc = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
+ decoder->packet_buffer[decoder->tail].exc_ret = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
+ case OCSD_GEN_TRC_ELEM_EO_TRACE:
+ case OCSD_GEN_TRC_ELEM_ADDR_NACC:
+ case OCSD_GEN_TRC_ELEM_TIMESTAMP:
+ case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
+ case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
+ case OCSD_GEN_TRC_ELEM_EVENT:
+ case OCSD_GEN_TRC_ELEM_SWTRACE:
+ case OCSD_GEN_TRC_ELEM_CUSTOM:
+ default:
+ break;
+ }
+
+ return resp;
+}
+
+static int cs_etm_decoder__create_etm_packet_decoder(
+ struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ const char *decoder_name;
+ ocsd_etmv4_cfg trace_config_etmv4;
+ void *trace_config;
+ u8 csid;
+
+ switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV4i:
+ cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
+ decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
+ trace_config = &trace_config_etmv4;
+ break;
+ default:
+ return -1;
+ }
+
+ if (ocsd_dt_create_decoder(decoder->dcd_tree,
+ decoder_name,
+ OCSD_CREATE_FLG_FULL_DECODER,
+ trace_config, &csid))
+ return -1;
+
+ if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
+ cs_etm_decoder__gen_trace_elem_printer,
+ decoder))
+ return -1;
+
+ return 0;
+}
+
+static int
+cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ if (d_params->operation == CS_ETM_OPERATION_PRINT)
+ return cs_etm_decoder__create_etm_packet_printer(t_params,
+ decoder);
+ else if (d_params->operation == CS_ETM_OPERATION_DECODE)
+ return cs_etm_decoder__create_etm_packet_decoder(t_params,
+ decoder);
+
+ return -1;
+}
+
+struct cs_etm_decoder *
+cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params t_params[])
+{
+ struct cs_etm_decoder *decoder;
+ ocsd_dcd_tree_src_t format;
+ u32 flags;
+ int i, ret;
+
+ if ((!t_params) || (!d_params))
+ return NULL;
+
+ decoder = zalloc(sizeof(*decoder));
+
+ if (!decoder)
+ return NULL;
+
+ decoder->data = d_params->data;
+ decoder->prev_return = OCSD_RESP_CONT;
+ cs_etm_decoder__clear_buffer(decoder);
+ format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
+ OCSD_TRC_SRC_SINGLE);
+ flags = 0;
+ flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
+ flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
+ flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
+
+ /*
+ * Drivers may add barrier frames when used with perf, set up to
+ * handle this. Barriers const of FSYNC packet repeated 4 times.
+ */
+ flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
+
+ /* Create decode tree for the data source */
+ decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
+
+ if (decoder->dcd_tree == 0)
+ goto err_free_decoder;
+
+ /* init library print logging support */
+ ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
+ if (ret != 0)
+ goto err_free_decoder_tree;
+
+ /* init raw frame logging if required */
+ cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
+
+ for (i = 0; i < num_cpu; i++) {
+ ret = cs_etm_decoder__create_etm_decoder(d_params,
+ &t_params[i],
+ decoder);
+ if (ret != 0)
+ goto err_free_decoder_tree;
+ }
+
+ return decoder;
+
+err_free_decoder_tree:
+ ocsd_destroy_dcd_tree(decoder->dcd_tree);
+err_free_decoder:
+ free(decoder);
+ return NULL;
+}
+
+int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
+ u64 indx, const u8 *buf,
+ size_t len, size_t *consumed)
+{
+ int ret = 0;
+ ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
+ ocsd_datapath_resp_t prev_return = decoder->prev_return;
+ size_t processed = 0;
+ u32 count;
+
+ while (processed < len) {
+ if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
+ cur = ocsd_dt_process_data(decoder->dcd_tree,
+ OCSD_OP_FLUSH,
+ 0,
+ 0,
+ NULL,
+ NULL);
+ } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
+ cur = ocsd_dt_process_data(decoder->dcd_tree,
+ OCSD_OP_DATA,
+ indx + processed,
+ len - processed,
+ &buf[processed],
+ &count);
+ processed += count;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+
+ /*
+ * Return to the input code if the packet buffer is full.
+ * Flushing will get done once the packet buffer has been
+ * processed.
+ */
+ if (OCSD_DATA_RESP_IS_WAIT(cur))
+ break;
+
+ prev_return = cur;
+ }
+
+ decoder->prev_return = cur;
+ *consumed = processed;
+
+ return ret;
+}
+
+void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ ocsd_destroy_dcd_tree(decoder->dcd_tree);
+ decoder->dcd_tree = NULL;
+ free(decoder);
+}
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
new file mode 100644
index 000000000000..3d2e6205d186
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -0,0 +1,105 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#ifndef INCLUDE__CS_ETM_DECODER_H__
+#define INCLUDE__CS_ETM_DECODER_H__
+
+#include <linux/types.h>
+#include <stdio.h>
+
+struct cs_etm_decoder;
+
+struct cs_etm_buffer {
+ const unsigned char *buf;
+ size_t len;
+ u64 offset;
+ u64 ref_timestamp;
+};
+
+enum cs_etm_sample_type {
+ CS_ETM_RANGE = 1 << 0,
+};
+
+struct cs_etm_packet {
+ enum cs_etm_sample_type sample_type;
+ u64 start_addr;
+ u64 end_addr;
+ u8 exc;
+ u8 exc_ret;
+ int cpu;
+};
+
+struct cs_etm_queue;
+
+typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
+ size_t, u8 *);
+
+struct cs_etmv4_trace_params {
+ u32 reg_idr0;
+ u32 reg_idr1;
+ u32 reg_idr2;
+ u32 reg_idr8;
+ u32 reg_configr;
+ u32 reg_traceidr;
+};
+
+struct cs_etm_trace_params {
+ int protocol;
+ union {
+ struct cs_etmv4_trace_params etmv4;
+ };
+};
+
+struct cs_etm_decoder_params {
+ int operation;
+ void (*packet_printer)(const char *msg);
+ cs_etm_mem_cb_type mem_acc_cb;
+ u8 formatted;
+ u8 fsyncs;
+ u8 hsyncs;
+ u8 frame_aligned;
+ void *data;
+};
+
+/*
+ * The following enums are indexed starting with 1 to align with the
+ * open source coresight trace decoder library.
+ */
+enum {
+ CS_ETM_PROTO_ETMV3 = 1,
+ CS_ETM_PROTO_ETMV4i,
+ CS_ETM_PROTO_ETMV4d,
+};
+
+enum {
+ CS_ETM_OPERATION_PRINT = 1,
+ CS_ETM_OPERATION_DECODE,
+};
+
+int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
+ u64 indx, const u8 *buf,
+ size_t len, size_t *consumed);
+
+struct cs_etm_decoder *
+cs_etm_decoder__new(int num_cpu,
+ struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params t_params[]);
+
+void cs_etm_decoder__free(struct cs_etm_decoder *decoder);
+
+int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
+ u64 start, u64 end,
+ cs_etm_mem_cb_type cb_func);
+
+int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
+ struct cs_etm_packet *packet);
+
+int cs_etm_decoder__reset(struct cs_etm_decoder *decoder);
+
+#endif /* INCLUDE__CS_ETM_DECODER_H__ */
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
new file mode 100644
index 000000000000..b9f0a53dfa65
--- /dev/null
+++ b/tools/perf/util/cs-etm.c
@@ -0,0 +1,1023 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include <stdlib.h>
+
+#include "auxtrace.h"
+#include "color.h"
+#include "cs-etm.h"
+#include "cs-etm-decoder/cs-etm-decoder.h"
+#include "debug.h"
+#include "evlist.h"
+#include "intlist.h"
+#include "machine.h"
+#include "map.h"
+#include "perf.h"
+#include "thread.h"
+#include "thread_map.h"
+#include "thread-stack.h"
+#include "util.h"
+
+#define MAX_TIMESTAMP (~0ULL)
+
+struct cs_etm_auxtrace {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ struct itrace_synth_opts synth_opts;
+ struct perf_session *session;
+ struct machine *machine;
+ struct thread *unknown_thread;
+
+ u8 timeless_decoding;
+ u8 snapshot_mode;
+ u8 data_queued;
+ u8 sample_branches;
+
+ int num_cpu;
+ u32 auxtrace_type;
+ u64 branches_sample_type;
+ u64 branches_id;
+ u64 **metadata;
+ u64 kernel_start;
+ unsigned int pmu_type;
+};
+
+struct cs_etm_queue {
+ struct cs_etm_auxtrace *etm;
+ struct thread *thread;
+ struct cs_etm_decoder *decoder;
+ struct auxtrace_buffer *buffer;
+ const struct cs_etm_state *state;
+ union perf_event *event_buf;
+ unsigned int queue_nr;
+ pid_t pid, tid;
+ int cpu;
+ u64 time;
+ u64 timestamp;
+ u64 offset;
+};
+
+static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
+static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_);
+
+static void cs_etm__packet_dump(const char *pkt_string)
+{
+ const char *color = PERF_COLOR_BLUE;
+ int len = strlen(pkt_string);
+
+ if (len && (pkt_string[len-1] == '\n'))
+ color_fprintf(stdout, color, " %s", pkt_string);
+ else
+ color_fprintf(stdout, color, " %s\n", pkt_string);
+
+ fflush(stdout);
+}
+
+static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
+ struct auxtrace_buffer *buffer)
+{
+ int i, ret;
+ const char *color = PERF_COLOR_BLUE;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ struct cs_etm_decoder *decoder;
+ size_t buffer_used = 0;
+
+ fprintf(stdout, "\n");
+ color_fprintf(stdout, color,
+ ". ... CoreSight ETM Trace data: size %zu bytes\n",
+ buffer->size);
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+ for (i = 0; i < etm->num_cpu; i++) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
+ etm->metadata[i][CS_ETMV4_TRCCONFIGR];
+ t_params[i].etmv4.reg_traceidr =
+ etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
+
+ /* Set decoder parameters to simply print the trace packets */
+ d_params.packet_printer = cs_etm__packet_dump;
+ d_params.operation = CS_ETM_OPERATION_PRINT;
+ d_params.formatted = true;
+ d_params.fsyncs = false;
+ d_params.hsyncs = false;
+ d_params.frame_aligned = true;
+
+ decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
+
+ zfree(&t_params);
+
+ if (!decoder)
+ return;
+ do {
+ size_t consumed;
+
+ ret = cs_etm_decoder__process_data_block(
+ decoder, buffer->offset,
+ &((u8 *)buffer->data)[buffer_used],
+ buffer->size - buffer_used, &consumed);
+ if (ret)
+ break;
+
+ buffer_used += consumed;
+ } while (buffer_used < buffer->size);
+
+ cs_etm_decoder__free(decoder);
+}
+
+static int cs_etm__flush_events(struct perf_session *session,
+ struct perf_tool *tool)
+{
+ int ret;
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events)
+ return -EINVAL;
+
+ if (!etm->timeless_decoding)
+ return -EINVAL;
+
+ ret = cs_etm__update_queues(etm);
+
+ if (ret < 0)
+ return ret;
+
+ return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+}
+
+static void cs_etm__free_queue(void *priv)
+{
+ struct cs_etm_queue *etmq = priv;
+
+ free(etmq);
+}
+
+static void cs_etm__free_events(struct perf_session *session)
+{
+ unsigned int i;
+ struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ struct auxtrace_queues *queues = &aux->queues;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ cs_etm__free_queue(queues->queue_array[i].priv);
+ queues->queue_array[i].priv = NULL;
+ }
+
+ auxtrace_queues__free(queues);
+}
+
+static void cs_etm__free(struct perf_session *session)
+{
+ int i;
+ struct int_node *inode, *tmp;
+ struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ cs_etm__free_events(session);
+ session->auxtrace = NULL;
+
+ /* First remove all traceID/CPU# nodes for the RB tree */
+ intlist__for_each_entry_safe(inode, tmp, traceid_list)
+ intlist__remove(traceid_list, inode);
+ /* Then the RB tree itself */
+ intlist__delete(traceid_list);
+
+ for (i = 0; i < aux->num_cpu; i++)
+ zfree(&aux->metadata[i]);
+
+ zfree(&aux->metadata);
+ zfree(&aux);
+}
+
+static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
+ size_t size, u8 *buffer)
+{
+ u8 cpumode;
+ u64 offset;
+ int len;
+ struct thread *thread;
+ struct machine *machine;
+ struct addr_location al;
+
+ if (!etmq)
+ return -1;
+
+ machine = etmq->etm->machine;
+ if (address >= etmq->etm->kernel_start)
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ else
+ cpumode = PERF_RECORD_MISC_USER;
+
+ thread = etmq->thread;
+ if (!thread) {
+ if (cpumode != PERF_RECORD_MISC_KERNEL)
+ return -EINVAL;
+ thread = etmq->etm->unknown_thread;
+ }
+
+ thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
+
+ if (!al.map || !al.map->dso)
+ return 0;
+
+ if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
+ dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
+ return 0;
+
+ offset = al.map->map_ip(al.map, address);
+
+ map__load(al.map);
+
+ len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
+
+ if (len <= 0)
+ return 0;
+
+ return len;
+}
+
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
+ unsigned int queue_nr)
+{
+ int i;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ struct cs_etm_queue *etmq;
+
+ etmq = zalloc(sizeof(*etmq));
+ if (!etmq)
+ return NULL;
+
+ etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
+ if (!etmq->event_buf)
+ goto out_free;
+
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->pid = -1;
+ etmq->tid = -1;
+ etmq->cpu = -1;
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+
+ if (!t_params)
+ goto out_free;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
+ etm->metadata[i][CS_ETMV4_TRCCONFIGR];
+ t_params[i].etmv4.reg_traceidr =
+ etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
+
+ /* Set decoder parameters to simply print the trace packets */
+ d_params.packet_printer = cs_etm__packet_dump;
+ d_params.operation = CS_ETM_OPERATION_DECODE;
+ d_params.formatted = true;
+ d_params.fsyncs = false;
+ d_params.hsyncs = false;
+ d_params.frame_aligned = true;
+ d_params.data = etmq;
+
+ etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
+
+ zfree(&t_params);
+
+ if (!etmq->decoder)
+ goto out_free;
+
+ /*
+ * Register a function to handle all memory accesses required by
+ * the trace decoder library.
+ */
+ if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
+ 0x0L, ((u64) -1L),
+ cs_etm__mem_access))
+ goto out_free_decoder;
+
+ etmq->offset = 0;
+
+ return etmq;
+
+out_free_decoder:
+ cs_etm_decoder__free(etmq->decoder);
+out_free:
+ zfree(&etmq->event_buf);
+ free(etmq);
+
+ return NULL;
+}
+
+static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
+ struct auxtrace_queue *queue,
+ unsigned int queue_nr)
+{
+ struct cs_etm_queue *etmq = queue->priv;
+
+ if (list_empty(&queue->head) || etmq)
+ return 0;
+
+ etmq = cs_etm__alloc_queue(etm, queue_nr);
+
+ if (!etmq)
+ return -ENOMEM;
+
+ queue->priv = etmq;
+
+ if (queue->cpu != -1)
+ etmq->cpu = queue->cpu;
+
+ etmq->tid = queue->tid;
+
+ return 0;
+}
+
+static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < etm->queues.nr_queues; i++) {
+ ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
+{
+ if (etm->queues.new_data) {
+ etm->queues.new_data = false;
+ return cs_etm__setup_queues(etm);
+ }
+
+ return 0;
+}
+
+static int
+cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+{
+ struct auxtrace_buffer *aux_buffer = etmq->buffer;
+ struct auxtrace_buffer *old_buffer = aux_buffer;
+ struct auxtrace_queue *queue;
+
+ queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
+
+ aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
+
+ /* If no more data, drop the previous auxtrace_buffer and return */
+ if (!aux_buffer) {
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+ buff->len = 0;
+ return 0;
+ }
+
+ etmq->buffer = aux_buffer;
+
+ /* If the aux_buffer doesn't have data associated, try to load it */
+ if (!aux_buffer->data) {
+ /* get the file desc associated with the perf data file */
+ int fd = perf_data__fd(etmq->etm->session->data);
+
+ aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
+ if (!aux_buffer->data)
+ return -ENOMEM;
+ }
+
+ /* If valid, drop the previous buffer */
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+
+ buff->offset = aux_buffer->offset;
+ buff->len = aux_buffer->size;
+ buff->buf = aux_buffer->data;
+
+ buff->ref_timestamp = aux_buffer->reference;
+
+ return buff->len;
+}
+
+static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
+ struct auxtrace_queue *queue)
+{
+ struct cs_etm_queue *etmq = queue->priv;
+
+ /* CPU-wide tracing isn't supported yet */
+ if (queue->tid == -1)
+ return;
+
+ if ((!etmq->thread) && (etmq->tid != -1))
+ etmq->thread = machine__find_thread(etm->machine, -1,
+ etmq->tid);
+
+ if (etmq->thread) {
+ etmq->pid = etmq->thread->pid_;
+ if (queue->cpu == -1)
+ etmq->cpu = etmq->thread->cpu;
+ }
+}
+
+/*
+ * The cs etm packet encodes an instruction range between a branch target
+ * and the next taken branch. Generate sample accordingly.
+ */
+static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet)
+{
+ int ret = 0;
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct perf_sample sample = {.ip = 0,};
+ union perf_event *event = etmq->event_buf;
+ u64 start_addr = packet->start_addr;
+ u64 end_addr = packet->end_addr;
+
+ event->sample.header.type = PERF_RECORD_SAMPLE;
+ event->sample.header.misc = PERF_RECORD_MISC_USER;
+ event->sample.header.size = sizeof(struct perf_event_header);
+
+ sample.ip = start_addr;
+ sample.pid = etmq->pid;
+ sample.tid = etmq->tid;
+ sample.addr = end_addr;
+ sample.id = etmq->etm->branches_id;
+ sample.stream_id = etmq->etm->branches_id;
+ sample.period = 1;
+ sample.cpu = packet->cpu;
+ sample.flags = 0;
+ sample.cpumode = PERF_RECORD_MISC_USER;
+
+ ret = perf_session__deliver_synth_event(etm->session, event, &sample);
+
+ if (ret)
+ pr_err(
+ "CS ETM Trace: failed to deliver instruction event, error %d\n",
+ ret);
+
+ return ret;
+}
+
+struct cs_etm_synth {
+ struct perf_tool dummy_tool;
+ struct perf_session *session;
+};
+
+static int cs_etm__event_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct cs_etm_synth *cs_etm_synth =
+ container_of(tool, struct cs_etm_synth, dummy_tool);
+
+ return perf_session__deliver_synth_event(cs_etm_synth->session,
+ event, NULL);
+}
+
+static int cs_etm__synth_event(struct perf_session *session,
+ struct perf_event_attr *attr, u64 id)
+{
+ struct cs_etm_synth cs_etm_synth;
+
+ memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
+ cs_etm_synth.session = session;
+
+ return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
+ &id, cs_etm__event_synth);
+}
+
+static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
+ struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr;
+ bool found = false;
+ u64 id;
+ int err;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->attr.type == etm->pmu_type) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("No selected events with CoreSight Trace data\n");
+ return 0;
+ }
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.size = sizeof(struct perf_event_attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
+ attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
+ PERF_SAMPLE_PERIOD;
+ if (etm->timeless_decoding)
+ attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
+ else
+ attr.sample_type |= PERF_SAMPLE_TIME;
+
+ attr.exclude_user = evsel->attr.exclude_user;
+ attr.exclude_kernel = evsel->attr.exclude_kernel;
+ attr.exclude_hv = evsel->attr.exclude_hv;
+ attr.exclude_host = evsel->attr.exclude_host;
+ attr.exclude_guest = evsel->attr.exclude_guest;
+ attr.sample_id_all = evsel->attr.sample_id_all;
+ attr.read_format = evsel->attr.read_format;
+
+ /* create new id val to be a fixed offset from evsel id */
+ id = evsel->id[0] + 1000000000;
+
+ if (!id)
+ id = 1;
+
+ if (etm->synth_opts.branches) {
+ attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+ attr.sample_period = 1;
+ attr.sample_type |= PERF_SAMPLE_ADDR;
+ err = cs_etm__synth_event(session, &attr, id);
+ if (err)
+ return err;
+ etm->sample_branches = true;
+ etm->branches_sample_type = attr.sample_type;
+ etm->branches_id = id;
+ }
+
+ return 0;
+}
+
+static int cs_etm__sample(struct cs_etm_queue *etmq)
+{
+ int ret;
+ struct cs_etm_packet packet;
+
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder, &packet);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * If the packet contains an instruction range, generate an
+ * instruction sequence event.
+ */
+ if (packet.sample_type & CS_ETM_RANGE)
+ cs_etm__synth_branch_sample(etmq, &packet);
+ }
+
+ return 0;
+}
+
+static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct cs_etm_buffer buffer;
+ size_t buffer_used, processed;
+ int err = 0;
+
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
+ /* Go through each buffer in the queue and decode them one by one */
+more:
+ buffer_used = 0;
+ memset(&buffer, 0, sizeof(buffer));
+ err = cs_etm__get_trace(&buffer, etmq);
+ if (err <= 0)
+ return err;
+ /*
+ * We cannot assume consecutive blocks in the data file are contiguous,
+ * reset the decoder to force re-sync.
+ */
+ err = cs_etm_decoder__reset(etmq->decoder);
+ if (err != 0)
+ return err;
+
+ /* Run trace decoder until buffer consumed or end of trace */
+ do {
+ processed = 0;
+
+ err = cs_etm_decoder__process_data_block(
+ etmq->decoder,
+ etmq->offset,
+ &buffer.buf[buffer_used],
+ buffer.len - buffer_used,
+ &processed);
+
+ if (err)
+ return err;
+
+ etmq->offset += processed;
+ buffer_used += processed;
+
+ /*
+ * Nothing to do with an error condition, let's hope the next
+ * chunk will be better.
+ */
+ err = cs_etm__sample(etmq);
+ } while (buffer.len > buffer_used);
+
+goto more;
+
+ return err;
+}
+
+static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_)
+{
+ unsigned int i;
+ struct auxtrace_queues *queues = &etm->queues;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ struct auxtrace_queue *queue = &etm->queues.queue_array[i];
+ struct cs_etm_queue *etmq = queue->priv;
+
+ if (etmq && ((tid == -1) || (etmq->tid == tid))) {
+ etmq->time = time_;
+ cs_etm__set_pid_tid_cpu(etm, queue);
+ cs_etm__run_decoder(etmq);
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__process_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool)
+{
+ int err = 0;
+ u64 timestamp;
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events) {
+ pr_err("CoreSight ETM Trace requires ordered events\n");
+ return -EINVAL;
+ }
+
+ if (!etm->timeless_decoding)
+ return -EINVAL;
+
+ if (sample->time && (sample->time != (u64) -1))
+ timestamp = sample->time;
+ else
+ timestamp = 0;
+
+ if (timestamp || etm->timeless_decoding) {
+ err = cs_etm__update_queues(etm);
+ if (err)
+ return err;
+ }
+
+ if (event->header.type == PERF_RECORD_EXIT)
+ return cs_etm__process_timeless_queues(etm,
+ event->fork.tid,
+ sample->time);
+
+ return 0;
+}
+
+static int cs_etm__process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ if (!etm->data_queued) {
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ bool is_pipe = perf_data__is_pipe(session->data);
+ int err;
+
+ if (is_pipe)
+ data_offset = 0;
+ else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&etm->queues, session,
+ event, data_offset, &buffer);
+ if (err)
+ return err;
+
+ if (dump_trace)
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ cs_etm__dump_event(etm, buffer);
+ auxtrace_buffer__put_data(buffer);
+ }
+ }
+
+ return 0;
+}
+
+static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
+{
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = etm->session->evlist;
+ bool timeless_decoding = true;
+
+ /*
+ * Circle through the list of event and complain if we find one
+ * with the time bit set.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
+ timeless_decoding = false;
+ }
+
+ return timeless_decoding;
+}
+
+static const char * const cs_etm_global_header_fmts[] = {
+ [CS_HEADER_VERSION_0] = " Header version %llx\n",
+ [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
+ [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
+};
+
+static const char * const cs_etm_priv_fmts[] = {
+ [CS_ETM_MAGIC] = " Magic number %llx\n",
+ [CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_ETMCR] = " ETMCR %llx\n",
+ [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
+ [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
+ [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
+};
+
+static const char * const cs_etmv4_priv_fmts[] = {
+ [CS_ETM_MAGIC] = " Magic number %llx\n",
+ [CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
+ [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
+ [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
+ [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
+ [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
+ [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
+ [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
+};
+
+static void cs_etm__print_auxtrace_info(u64 *val, int num)
+{
+ int i, j, cpu = 0;
+
+ for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+
+ for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
+ if (val[i] == __perf_cs_etmv3_magic)
+ for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
+ fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
+ else if (val[i] == __perf_cs_etmv4_magic)
+ for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
+ fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
+ else
+ /* failure.. return */
+ return;
+ }
+}
+
+int cs_etm__process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+ struct cs_etm_auxtrace *etm = NULL;
+ struct int_node *inode;
+ unsigned int pmu_type;
+ int event_header_size = sizeof(struct perf_event_header);
+ int info_header_size;
+ int total_size = auxtrace_info->header.size;
+ int priv_size = 0;
+ int num_cpu;
+ int err = 0, idx = -1;
+ int i, j, k;
+ u64 *ptr, *hdr = NULL;
+ u64 **metadata = NULL;
+
+ /*
+ * sizeof(auxtrace_info_event::type) +
+ * sizeof(auxtrace_info_event::reserved) == 8
+ */
+ info_header_size = 8;
+
+ if (total_size < (event_header_size + info_header_size))
+ return -EINVAL;
+
+ priv_size = total_size - event_header_size - info_header_size;
+
+ /* First the global part */
+ ptr = (u64 *) auxtrace_info->priv;
+
+ /* Look for version '0' of the header */
+ if (ptr[0] != 0)
+ return -EINVAL;
+
+ hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
+ if (!hdr)
+ return -ENOMEM;
+
+ /* Extract header information - see cs-etm.h for format */
+ for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ hdr[i] = ptr[i];
+ num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
+ pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
+ 0xffffffff);
+
+ /*
+ * Create an RB tree for traceID-CPU# tuple. Since the conversion has
+ * to be made for each packet that gets decoded, optimizing access in
+ * anything other than a sequential array is worth doing.
+ */
+ traceid_list = intlist__new(NULL);
+ if (!traceid_list) {
+ err = -ENOMEM;
+ goto err_free_hdr;
+ }
+
+ metadata = zalloc(sizeof(*metadata) * num_cpu);
+ if (!metadata) {
+ err = -ENOMEM;
+ goto err_free_traceid_list;
+ }
+
+ /*
+ * The metadata is stored in the auxtrace_info section and encodes
+ * the configuration of the ARM embedded trace macrocell which is
+ * required by the trace decoder to properly decode the trace due
+ * to its highly compressed nature.
+ */
+ for (j = 0; j < num_cpu; j++) {
+ if (ptr[i] == __perf_cs_etmv3_magic) {
+ metadata[j] = zalloc(sizeof(*metadata[j]) *
+ CS_ETM_PRIV_MAX);
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+ for (k = 0; k < CS_ETM_PRIV_MAX; k++)
+ metadata[j][k] = ptr[i + k];
+
+ /* The traceID is our handle */
+ idx = metadata[j][CS_ETM_ETMTRACEIDR];
+ i += CS_ETM_PRIV_MAX;
+ } else if (ptr[i] == __perf_cs_etmv4_magic) {
+ metadata[j] = zalloc(sizeof(*metadata[j]) *
+ CS_ETMV4_PRIV_MAX);
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+ for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
+ metadata[j][k] = ptr[i + k];
+
+ /* The traceID is our handle */
+ idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
+ i += CS_ETMV4_PRIV_MAX;
+ }
+
+ /* Get an RB node for this CPU */
+ inode = intlist__findnew(traceid_list, idx);
+
+ /* Something went wrong, no need to continue */
+ if (!inode) {
+ err = PTR_ERR(inode);
+ goto err_free_metadata;
+ }
+
+ /*
+ * The node for that CPU should not be taken.
+ * Back out if that's the case.
+ */
+ if (inode->priv) {
+ err = -EINVAL;
+ goto err_free_metadata;
+ }
+ /* All good, associate the traceID with the CPU# */
+ inode->priv = &metadata[j][CS_ETM_CPU];
+ }
+
+ /*
+ * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * CS_ETMV4_PRIV_MAX mark how many double words are in the
+ * global metadata, and each cpu's metadata respectively.
+ * The following tests if the correct number of double words was
+ * present in the auxtrace info section.
+ */
+ if (i * 8 != priv_size) {
+ err = -EINVAL;
+ goto err_free_metadata;
+ }
+
+ etm = zalloc(sizeof(*etm));
+
+ if (!etm) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+
+ err = auxtrace_queues__init(&etm->queues);
+ if (err)
+ goto err_free_etm;
+
+ etm->session = session;
+ etm->machine = &session->machines.host;
+
+ etm->num_cpu = num_cpu;
+ etm->pmu_type = pmu_type;
+ etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
+ etm->metadata = metadata;
+ etm->auxtrace_type = auxtrace_info->type;
+ etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
+
+ etm->auxtrace.process_event = cs_etm__process_event;
+ etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
+ etm->auxtrace.flush_events = cs_etm__flush_events;
+ etm->auxtrace.free_events = cs_etm__free_events;
+ etm->auxtrace.free = cs_etm__free;
+ session->auxtrace = &etm->auxtrace;
+
+ if (dump_trace) {
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
+ return 0;
+ }
+
+ if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
+ etm->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&etm->synth_opts);
+ etm->synth_opts.callchain = false;
+ }
+
+ err = cs_etm__synth_events(etm, session);
+ if (err)
+ goto err_free_queues;
+
+ err = auxtrace_queues__process_index(&etm->queues, session);
+ if (err)
+ goto err_free_queues;
+
+ etm->data_queued = etm->queues.populated;
+
+ return 0;
+
+err_free_queues:
+ auxtrace_queues__free(&etm->queues);
+ session->auxtrace = NULL;
+err_free_etm:
+ zfree(&etm);
+err_free_metadata:
+ /* No need to check @metadata[j], free(NULL) is supported */
+ for (j = 0; j < num_cpu; j++)
+ free(metadata[j]);
+ zfree(&metadata);
+err_free_traceid_list:
+ intlist__delete(traceid_list);
+err_free_hdr:
+ zfree(&hdr);
+
+ return -EINVAL;
+}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 3cc6bc3263fe..5864d5dca616 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -18,6 +18,9 @@
#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
#define INCLUDE__UTIL_PERF_CS_ETM_H__
+#include "util/event.h"
+#include "util/session.h"
+
/* Versionning header in case things need tro change in the future. That way
* decoding of old snapshot is still possible.
*/
@@ -61,6 +64,9 @@ enum {
CS_ETMV4_PRIV_MAX,
};
+/* RB tree for quick conversion between traceID and CPUs */
+struct intlist *traceid_list;
+
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -71,4 +77,16 @@ static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
+#ifdef HAVE_CSTRACE_SUPPORT
+int cs_etm__process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+#else
+static inline int
+cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ return -1;
+}
+#endif
+
#endif
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 48094fde0a68..d8cfc19ddb10 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -12,16 +12,6 @@
#include "util.h"
#include "debug.h"
-#ifndef O_CLOEXEC
-#ifdef __sparc__
-#define O_CLOEXEC 0x400000
-#elif defined(__alpha__) || defined(__hppa__)
-#define O_CLOEXEC 010000000
-#else
-#define O_CLOEXEC 02000000
-#endif
-#endif
-
static bool check_pipe(struct perf_data *data)
{
struct stat st;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d5b6f7f5baff..36ef45b2e89d 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -446,7 +446,7 @@ static int do_open(char *name)
char sbuf[STRERR_BUFSIZE];
do {
- fd = open(name, O_RDONLY);
+ fd = open(name, O_RDONLY|O_CLOEXEC);
if (fd >= 0)
return fd;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 6276b340f893..6d311868d850 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include "cpumap.h"
#include "env.h"
+#include "sane_ctype.h"
#include "util.h"
#include <errno.h>
+#include <sys/utsname.h>
struct perf_env perf_env;
@@ -93,3 +95,48 @@ void cpu_cache_level__free(struct cpu_cache_level *cache)
free(cache->map);
free(cache->size);
}
+
+/*
+ * Return architecture name in a normalized form.
+ * The conversion logic comes from the Makefile.
+ */
+static const char *normalize_arch(char *arch)
+{
+ if (!strcmp(arch, "x86_64"))
+ return "x86";
+ if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
+ return "x86";
+ if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
+ return "sparc";
+ if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
+ return "arm64";
+ if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
+ return "arm";
+ if (!strncmp(arch, "s390", 4))
+ return "s390";
+ if (!strncmp(arch, "parisc", 6))
+ return "parisc";
+ if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
+ return "powerpc";
+ if (!strncmp(arch, "mips", 4))
+ return "mips";
+ if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
+ return "sh";
+
+ return arch;
+}
+
+const char *perf_env__arch(struct perf_env *env)
+{
+ struct utsname uts;
+ char *arch_name;
+
+ if (!env) { /* Assume local operation */
+ if (uname(&uts) < 0)
+ return NULL;
+ arch_name = uts.machine;
+ } else
+ arch_name = env->arch;
+
+ return normalize_arch(arch_name);
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 1eb35b190b34..bf970f57dce0 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -65,4 +65,6 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
+
+const char *perf_env__arch(struct perf_env *env);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 97a8ef9980db..44e603c27944 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1435,6 +1435,11 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
event->context_switch.next_prev_tid);
}
+static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1467,6 +1472,9 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_SWITCH_CPU_WIDE:
ret += perf_event__fprintf_switch(event, fp);
break;
+ case PERF_RECORD_LOST:
+ ret += perf_event__fprintf_lost(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1ae95efbfb95..0f794744919c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -205,6 +205,7 @@ struct perf_sample {
u32 flags;
u16 insn_len;
u8 cpumode;
+ u16 misc;
char insn[MAX_INSN];
void *raw_data;
struct ip_callchain *callchain;
@@ -774,8 +775,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
u64 read_format);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
u64 read_format,
- const struct perf_sample *sample,
- bool swapped);
+ const struct perf_sample *sample);
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
union perf_event *event, pid_t pid,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c6c891e154a6..e5fc14e53c05 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,6 +25,7 @@
#include "parse-events.h"
#include <subcmd/parse-options.h>
+#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -125,7 +126,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
zfree(&evlist->mmap);
- zfree(&evlist->backward_mmap);
+ zfree(&evlist->overwrite_mmap);
fdarray__exit(&evlist->pollfd);
}
@@ -257,7 +258,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
};
- struct perf_evsel *evsel = perf_evsel__new(&attr);
+ struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
if (evsel == NULL)
return -ENOMEM;
@@ -675,11 +676,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
{
int i;
- if (!evlist->backward_mmap)
+ if (!evlist->overwrite_mmap)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
- int fd = evlist->backward_mmap[i].fd;
+ int fd = evlist->overwrite_mmap[i].fd;
int err;
if (fd < 0)
@@ -711,19 +712,7 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
* No need for read-write ring buffer: kernel stop outputting when
* it hit md->prev (perf_mmap__consume()).
*/
- return perf_mmap__read_forward(md, evlist->overwrite);
-}
-
-union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
-{
- struct perf_mmap *md = &evlist->mmap[idx];
-
- /*
- * No need to check messup for backward ring buffer:
- * We can always read arbitrary long data from a backward
- * ring buffer unless we forget to pause it before reading.
- */
- return perf_mmap__read_backward(md);
+ return perf_mmap__read_forward(md);
}
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
@@ -731,14 +720,9 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return perf_evlist__mmap_read_forward(evlist, idx);
}
-void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
-{
- perf_mmap__read_catchup(&evlist->mmap[idx]);
-}
-
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
- perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
+ perf_mmap__consume(&evlist->mmap[idx], false);
}
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
@@ -749,16 +733,16 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
for (i = 0; i < evlist->nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]);
- if (evlist->backward_mmap)
+ if (evlist->overwrite_mmap)
for (i = 0; i < evlist->nr_mmaps; i++)
- perf_mmap__munmap(&evlist->backward_mmap[i]);
+ perf_mmap__munmap(&evlist->overwrite_mmap[i]);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
{
perf_evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
- zfree(&evlist->backward_mmap);
+ zfree(&evlist->overwrite_mmap);
}
static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
@@ -800,7 +784,7 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int cpu_idx,
- int thread, int *_output, int *_output_backward)
+ int thread, int *_output, int *_output_overwrite)
{
struct perf_evsel *evsel;
int revent;
@@ -812,18 +796,20 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
int fd;
int cpu;
+ mp->prot = PROT_READ | PROT_WRITE;
if (evsel->attr.write_backward) {
- output = _output_backward;
- maps = evlist->backward_mmap;
+ output = _output_overwrite;
+ maps = evlist->overwrite_mmap;
if (!maps) {
maps = perf_evlist__alloc_mmap(evlist);
if (!maps)
return -1;
- evlist->backward_mmap = maps;
+ evlist->overwrite_mmap = maps;
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
+ mp->prot &= ~PROT_WRITE;
}
if (evsel->system_wide && thread)
@@ -884,14 +870,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
- int output_backward = -1;
+ int output_overwrite = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
true);
for (thread = 0; thread < nr_threads; thread++) {
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
- thread, &output, &output_backward))
+ thread, &output, &output_overwrite))
goto out_unmap;
}
}
@@ -912,13 +898,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- int output_backward = -1;
+ int output_overwrite = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false);
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
- &output, &output_backward))
+ &output, &output_overwrite))
goto out_unmap;
}
@@ -1052,15 +1038,18 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite, unsigned int auxtrace_pages,
+ unsigned int auxtrace_pages,
bool auxtrace_overwrite)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- struct mmap_params mp = {
- .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
- };
+ /*
+ * Delay setting mp.prot: set it before calling perf_mmap__mmap.
+ * Its value is decided by evsel's write_backward.
+ * So &mp should not be passed through const pointer.
+ */
+ struct mmap_params mp;
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist);
@@ -1070,7 +1059,6 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- evlist->overwrite = overwrite;
evlist->mmap_len = perf_evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->mmap_len);
mp.mask = evlist->mmap_len - page_size - 1;
@@ -1091,10 +1079,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, &mp);
}
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite)
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1102,7 +1089,8 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
struct cpu_map *cpus;
struct thread_map *threads;
- threads = thread_map__new_str(target->pid, target->tid, target->uid);
+ threads = thread_map__new_str(target->pid, target->tid, target->uid,
+ target->per_thread);
if (!threads)
return -1;
@@ -1582,6 +1570,17 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even
return perf_evsel__parse_sample(evsel, event, sample);
}
+int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
+ union perf_event *event,
+ u64 *timestamp)
+{
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
+ return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
+}
+
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
struct perf_evsel *evsel;
@@ -1739,13 +1738,13 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
RESUME,
} action = NONE;
- if (!evlist->backward_mmap)
+ if (!evlist->overwrite_mmap)
return;
switch (old_state) {
case BKW_MMAP_NOTREADY: {
if (state != BKW_MMAP_RUNNING)
- goto state_err;;
+ goto state_err;
break;
}
case BKW_MMAP_RUNNING: {
@@ -1786,3 +1785,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
state_err:
return;
}
+
+bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (!evsel->attr.exclude_kernel)
+ return false;
+ }
+
+ return true;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index e72ae64c11ac..336b838e6957 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,7 +7,6 @@
#include <linux/refcount.h>
#include <linux/list.h>
#include <api/fd/array.h>
-#include <fcntl.h>
#include <stdio.h>
#include "../perf.h"
#include "event.h"
@@ -31,7 +30,6 @@ struct perf_evlist {
int nr_entries;
int nr_groups;
int nr_mmaps;
- bool overwrite;
bool enabled;
bool has_user_cpus;
size_t mmap_len;
@@ -45,12 +43,14 @@ struct perf_evlist {
} workload;
struct fdarray pollfd;
struct perf_mmap *mmap;
- struct perf_mmap *backward_mmap;
+ struct perf_mmap *overwrite_mmap;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ u64 first_sample_time;
+ u64 last_sample_time;
};
struct perf_evsel_str_handler {
@@ -133,10 +133,6 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
int idx);
-union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
- int idx);
-void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
-
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
@@ -169,10 +165,9 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite, unsigned int auxtrace_pages,
+ unsigned int auxtrace_pages,
bool auxtrace_overwrite);
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite);
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
size_t perf_evlist__mmap_size(unsigned long pages);
@@ -205,6 +200,10 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
struct perf_sample *sample);
+int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
+ union perf_event *event,
+ u64 *timestamp);
+
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
@@ -312,4 +311,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
union perf_event *event);
+
+bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f894893c203d..ef351688b797 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -36,21 +36,12 @@
#include "debug.h"
#include "trace-event.h"
#include "stat.h"
+#include "memswap.h"
#include "util/parse-branch-options.h"
#include "sane_ctype.h"
-static struct {
- bool sample_id_all;
- bool exclude_guest;
- bool mmap2;
- bool cloexec;
- bool clockid;
- bool clockid_wrong;
- bool lbr_flags;
- bool write_backward;
- bool group_read;
-} perf_missing_features;
+struct perf_missing_features perf_missing_features;
static clockid_t clockid;
@@ -650,9 +641,9 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
return ret;
}
-void perf_evsel__config_callchain(struct perf_evsel *evsel,
- struct record_opts *opts,
- struct callchain_param *param)
+static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
{
bool function = perf_evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->attr;
@@ -698,6 +689,14 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
}
}
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
+{
+ if (param->enabled)
+ return __perf_evsel__config_callchain(evsel, opts, param);
+}
+
static void
perf_evsel__reset_callgraph(struct perf_evsel *evsel,
struct callchain_param *param)
@@ -717,28 +716,34 @@ perf_evsel__reset_callgraph(struct perf_evsel *evsel,
}
static void apply_config_terms(struct perf_evsel *evsel,
- struct record_opts *opts)
+ struct record_opts *opts, bool track)
{
struct perf_evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
struct perf_event_attr *attr = &evsel->attr;
- struct callchain_param param;
+ /* callgraph default */
+ struct callchain_param param = {
+ .record_mode = callchain_param.record_mode,
+ };
u32 dump_size = 0;
int max_stack = 0;
const char *callgraph_buf = NULL;
- /* callgraph default */
- param.record_mode = callchain_param.record_mode;
-
list_for_each_entry(term, config_terms, list) {
switch (term->type) {
case PERF_EVSEL__CONFIG_TERM_PERIOD:
- attr->sample_period = term->val.period;
- attr->freq = 0;
+ if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
+ attr->sample_period = term->val.period;
+ attr->freq = 0;
+ perf_evsel__reset_sample_bit(evsel, PERIOD);
+ }
break;
case PERF_EVSEL__CONFIG_TERM_FREQ:
- attr->sample_freq = term->val.freq;
- attr->freq = 1;
+ if (!(term->weak && opts->user_freq != UINT_MAX)) {
+ attr->sample_freq = term->val.freq;
+ attr->freq = 1;
+ perf_evsel__set_sample_bit(evsel, PERIOD);
+ }
break;
case PERF_EVSEL__CONFIG_TERM_TIME:
if (term->val.time)
@@ -775,6 +780,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
attr->write_backward = term->val.overwrite ? 1 : 0;
break;
+ case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
+ break;
default:
break;
}
@@ -782,6 +789,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
/* User explicitly set per-event callgraph, clear the old setting and reset. */
if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
+ bool sample_address = false;
+
if (max_stack) {
param.max_stack = max_stack;
if (callgraph_buf == NULL)
@@ -801,6 +810,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
evsel->name);
return;
}
+ if (param.record_mode == CALLCHAIN_DWARF)
+ sample_address = true;
}
}
if (dump_size > 0) {
@@ -813,8 +824,14 @@ static void apply_config_terms(struct perf_evsel *evsel,
perf_evsel__reset_callgraph(evsel, &callchain_param);
/* set perf-event callgraph */
- if (param.enabled)
+ if (param.enabled) {
+ if (sample_address) {
+ perf_evsel__set_sample_bit(evsel, ADDR);
+ perf_evsel__set_sample_bit(evsel, DATA_SRC);
+ evsel->attr.mmap_data = track;
+ }
perf_evsel__config_callchain(evsel, opts, &param);
+ }
}
}
@@ -944,9 +961,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
if (target__has_cpu(&opts->target) || opts->sample_cpu)
perf_evsel__set_sample_bit(evsel, CPU);
- if (opts->period)
- perf_evsel__set_sample_bit(evsel, PERIOD);
-
/*
* When the user explicitly disabled time don't force it here.
*/
@@ -1045,9 +1059,17 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
* Apply event specific term settings,
* it overloads any global configuration.
*/
- apply_config_terms(evsel, opts);
+ apply_config_terms(evsel, opts, track);
evsel->ignore_missing_thread = opts->ignore_missing_thread;
+
+ /* The --period option takes the precedence. */
+ if (opts->period_set) {
+ if (opts->period)
+ perf_evsel__set_sample_bit(evsel, PERIOD);
+ else
+ perf_evsel__reset_sample_bit(evsel, PERIOD);
+ }
}
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -1371,7 +1393,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
static int
perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
{
- struct perf_stat_evsel *ps = leader->priv;
+ struct perf_stat_evsel *ps = leader->stats;
u64 read_format = leader->attr.read_format;
int size = perf_evsel__read_size(leader);
u64 *data = ps->group_data;
@@ -1570,6 +1592,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(use_clockid, p_unsigned);
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
+ PRINT_ATTRf(namespaces, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1592,10 +1615,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
return fprintf(fp, " %-32s %s\n", name, val);
}
+static void perf_evsel__remove_fd(struct perf_evsel *pos,
+ int nr_cpus, int nr_threads,
+ int thread_idx)
+{
+ for (int cpu = 0; cpu < nr_cpus; cpu++)
+ for (int thread = thread_idx; thread < nr_threads - 1; thread++)
+ FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
+}
+
+static int update_fds(struct perf_evsel *evsel,
+ int nr_cpus, int cpu_idx,
+ int nr_threads, int thread_idx)
+{
+ struct perf_evsel *pos;
+
+ if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
+ return -EINVAL;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
+
+ perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
+
+ /*
+ * Since fds for next evsel has not been created,
+ * there is no need to iterate whole event list.
+ */
+ if (pos == evsel)
+ break;
+ }
+ return 0;
+}
+
static bool ignore_missing_thread(struct perf_evsel *evsel,
+ int nr_cpus, int cpu,
struct thread_map *threads,
int thread, int err)
{
+ pid_t ignore_pid = thread_map__pid(threads, thread);
+
if (!evsel->ignore_missing_thread)
return false;
@@ -1611,11 +1670,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
if (threads->nr == 1)
return false;
+ /*
+ * We should remove fd for missing_thread first
+ * because thread_map__remove() will decrease threads->nr.
+ */
+ if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
+ return false;
+
if (thread_map__remove(threads, thread))
return false;
pr_warning("WARNING: Ignored open failure for pid %d\n",
- thread_map__pid(threads, thread));
+ ignore_pid);
return true;
}
@@ -1720,7 +1786,7 @@ retry_open:
if (fd < 0) {
err = -errno;
- if (ignore_missing_thread(evsel, threads, thread, err)) {
+ if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
/*
* We just removed 1 thread, so take a step
* back on thread index and lower the upper
@@ -1956,6 +2022,20 @@ static inline bool overflow(const void *endp, u16 max_size, const void *offset,
#define OVERFLOW_CHECK_u64(offset) \
OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
+static int
+perf_event__check_size(union perf_event *event, unsigned int sample_size)
+{
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
+ if (sample_size + sizeof(event->header) > event->header.size)
+ return -EFAULT;
+
+ return 0;
+}
+
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -1977,6 +2057,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
data->stream_id = data->id = data->time = -1ULL;
data->period = evsel->attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ data->misc = event->header.misc;
+ data->id = -1ULL;
+ data->data_src = PERF_MEM_DATA_SRC_NONE;
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->attr.sample_id_all)
@@ -1986,15 +2069,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = event->sample.array;
- /*
- * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
- * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
- * check the format does not go past the end of the event.
- */
- if (evsel->sample_size + sizeof(event->header) > event->header.size)
+ if (perf_event__check_size(event, evsel->sample_size))
return -EFAULT;
- data->id = -1ULL;
if (type & PERF_SAMPLE_IDENTIFIER) {
data->id = *array;
array++;
@@ -2024,7 +2101,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->addr = 0;
if (type & PERF_SAMPLE_ADDR) {
data->addr = *array;
array++;
@@ -2116,14 +2192,27 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_RAW) {
OVERFLOW_CHECK_u64(array);
u.val64 = *array;
- if (WARN_ONCE(swapped,
- "Endianness of raw data not corrected!\n")) {
- /* undo swap of u64, then swap on individual u32s */
+
+ /*
+ * Undo swap of u64, then swap on individual u32s,
+ * get the size of the raw area and undo all of the
+ * swap. The pevent interface handles endianity by
+ * itself.
+ */
+ if (swapped) {
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
data->raw_size = u.val32[0];
+
+ /*
+ * The raw data is aligned on 64bits including the
+ * u32 size, so it's safe to use mem_bswap_64.
+ */
+ if (swapped)
+ mem_bswap_64((void *) array, data->raw_size);
+
array = (void *)array + sizeof(u32);
OVERFLOW_CHECK(array, data->raw_size, max_size);
@@ -2188,14 +2277,12 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->data_src = PERF_MEM_DATA_SRC_NONE;
if (type & PERF_SAMPLE_DATA_SRC) {
OVERFLOW_CHECK_u64(array);
data->data_src = *array;
array++;
}
- data->transaction = 0;
if (type & PERF_SAMPLE_TRANSACTION) {
OVERFLOW_CHECK_u64(array);
data->transaction = *array;
@@ -2228,6 +2315,50 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
return 0;
}
+int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
+ union perf_event *event,
+ u64 *timestamp)
+{
+ u64 type = evsel->attr.sample_type;
+ const u64 *array;
+
+ if (!(type & PERF_SAMPLE_TIME))
+ return -1;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ struct perf_sample data = {
+ .time = -1ULL,
+ };
+
+ if (!evsel->attr.sample_id_all)
+ return -1;
+ if (perf_evsel__parse_id_sample(evsel, event, &data))
+ return -1;
+
+ *timestamp = data.time;
+ return 0;
+ }
+
+ array = event->sample.array;
+
+ if (perf_event__check_size(event, evsel->sample_size))
+ return -EFAULT;
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ array++;
+
+ if (type & PERF_SAMPLE_IP)
+ array++;
+
+ if (type & PERF_SAMPLE_TID)
+ array++;
+
+ if (type & PERF_SAMPLE_TIME)
+ *timestamp = *array;
+
+ return 0;
+}
+
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
u64 read_format)
{
@@ -2338,8 +2469,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
int perf_event__synthesize_sample(union perf_event *event, u64 type,
u64 read_format,
- const struct perf_sample *sample,
- bool swapped)
+ const struct perf_sample *sample)
{
u64 *array;
size_t sz;
@@ -2364,15 +2494,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_TID) {
u.val32[0] = sample->pid;
u.val32[1] = sample->tid;
- if (swapped) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val32[1] = bswap_32(u.val32[1]);
- u.val64 = bswap_64(u.val64);
- }
-
*array = u.val64;
array++;
}
@@ -2399,13 +2520,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_CPU) {
u.val32[0] = sample->cpu;
- if (swapped) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val64 = bswap_64(u.val64);
- }
+ u.val32[1] = 0;
*array = u.val64;
array++;
}
@@ -2452,15 +2567,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_RAW) {
u.val32[0] = sample->raw_size;
- if (WARN_ONCE(swapped,
- "Endianness of raw data not corrected!\n")) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val32[1] = bswap_32(u.val32[1]);
- u.val64 = bswap_64(u.val64);
- }
*array = u.val64;
array = (void *)array + sizeof(u32);
@@ -2739,8 +2845,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
break;
case EOPNOTSUPP:
if (evsel->attr.sample_period != 0)
- return scnprintf(msg, size, "%s",
- "PMU Hardware doesn't support sampling/overflow-interrupts.");
+ return scnprintf(msg, size,
+ "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
+ perf_evsel__name(evsel));
if (evsel->attr.precise_ip)
return scnprintf(msg, size, "%s",
"\'precise\' request may not be supported. Try removing 'p' modifier.");
@@ -2777,16 +2884,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
perf_evsel__name(evsel));
}
-char *perf_evsel__env_arch(struct perf_evsel *evsel)
-{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env->arch;
- return NULL;
-}
-
-char *perf_evsel__env_cpuid(struct perf_evsel *evsel)
+struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env->cpuid;
+ if (evsel && evsel->evlist)
+ return evsel->evlist->env;
return NULL;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 9277df96ffda..a7487c6d1866 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -38,7 +38,7 @@ struct cgroup_sel;
* It is allocated within event parsing and attached to
* perf_evsel::config_terms list head.
*/
-enum {
+enum term_type {
PERF_EVSEL__CONFIG_TERM_PERIOD,
PERF_EVSEL__CONFIG_TERM_FREQ,
PERF_EVSEL__CONFIG_TERM_TIME,
@@ -49,12 +49,11 @@ enum {
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
PERF_EVSEL__CONFIG_TERM_BRANCH,
- PERF_EVSEL__CONFIG_TERM_MAX,
};
struct perf_evsel_config_term {
struct list_head list;
- int type;
+ enum term_type type;
union {
u64 period;
u64 freq;
@@ -67,6 +66,7 @@ struct perf_evsel_config_term {
bool overwrite;
char *branch;
} val;
+ bool weak;
};
struct perf_stat_evsel;
@@ -149,6 +149,20 @@ union u64_swap {
u32 val32[2];
};
+struct perf_missing_features {
+ bool sample_id_all;
+ bool exclude_guest;
+ bool mmap2;
+ bool cloexec;
+ bool clockid;
+ bool clockid_wrong;
+ bool lbr_flags;
+ bool write_backward;
+ bool group_read;
+};
+
+extern struct perf_missing_features perf_missing_features;
+
struct cpu_map;
struct target;
struct thread_map;
@@ -338,6 +352,10 @@ static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample);
+int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
+ union perf_event *event,
+ u64 *timestamp);
+
static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
{
return list_entry(evsel->node.next, struct perf_evsel, node);
@@ -442,7 +460,6 @@ typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv);
-char *perf_evsel__env_arch(struct perf_evsel *evsel);
-char *perf_evsel__env_cpuid(struct perf_evsel *evsel);
+struct perf_env *perf_evsel__env(struct perf_evsel *evsel);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index 9bbcec4e3365..ff17920a5ebc 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -38,7 +38,7 @@ do
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
-echo "#ifdef HAVE_LIBAUDIT_SUPPORT"
+echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)"
sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
sort |
while read cmd
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7c0e9d587bfa..a326e0d8b5b6 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -15,9 +15,8 @@
#include <linux/bitops.h>
#include <linux/stringify.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/utsname.h>
-#include <unistd.h>
+#include <linux/time64.h>
#include "evlist.h"
#include "evsel.h"
@@ -37,6 +36,7 @@
#include <api/fs/fs.h>
#include "asm/bug.h"
#include "tool.h"
+#include "time-utils.h"
#include "sane_ctype.h"
@@ -1182,6 +1182,20 @@ static int write_stat(struct feat_fd *ff __maybe_unused,
return 0;
}
+static int write_sample_time(struct feat_fd *ff,
+ struct perf_evlist *evlist)
+{
+ int ret;
+
+ ret = do_write(ff, &evlist->first_sample_time,
+ sizeof(evlist->first_sample_time));
+ if (ret < 0)
+ return ret;
+
+ return do_write(ff, &evlist->last_sample_time,
+ sizeof(evlist->last_sample_time));
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1507,6 +1521,28 @@ static void print_group_desc(struct feat_fd *ff, FILE *fp)
}
}
+static void print_sample_time(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_session *session;
+ char time_buf[32];
+ double d;
+
+ session = container_of(ff->ph, struct perf_session, header);
+
+ timestamp__scnprintf_usec(session->evlist->first_sample_time,
+ time_buf, sizeof(time_buf));
+ fprintf(fp, "# time of first sample : %s\n", time_buf);
+
+ timestamp__scnprintf_usec(session->evlist->last_sample_time,
+ time_buf, sizeof(time_buf));
+ fprintf(fp, "# time of last sample : %s\n", time_buf);
+
+ d = (double)(session->evlist->last_sample_time -
+ session->evlist->first_sample_time) / NSEC_PER_MSEC;
+
+ fprintf(fp, "# sample duration : %10.3f ms\n", d);
+}
+
static int __event_process_build_id(struct build_id_event *bev,
char *filename,
struct perf_session *session)
@@ -2148,6 +2184,27 @@ out_free_caches:
return -1;
}
+static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_session *session;
+ u64 first_sample_time, last_sample_time;
+ int ret;
+
+ session = container_of(ff->ph, struct perf_session, header);
+
+ ret = do_read_u64(ff, &first_sample_time);
+ if (ret)
+ return -1;
+
+ ret = do_read_u64(ff, &last_sample_time);
+ if (ret)
+ return -1;
+
+ session->evlist->first_sample_time = first_sample_time;
+ session->evlist->last_sample_time = last_sample_time;
+ return 0;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2205,6 +2262,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(AUXTRACE, auxtrace, false),
FEAT_OPN(STAT, stat, false),
FEAT_OPN(CACHE, cache, true),
+ FEAT_OPR(SAMPLE_TIME, sample_time, false),
};
struct header_print_data {
@@ -3258,6 +3316,74 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
return err;
}
+static bool has_unit(struct perf_evsel *counter)
+{
+ return counter->unit && *counter->unit;
+}
+
+static bool has_scale(struct perf_evsel *counter)
+{
+ return counter->scale != 1;
+}
+
+int perf_event__synthesize_extra_attr(struct perf_tool *tool,
+ struct perf_evlist *evsel_list,
+ perf_event__handler_t process,
+ bool is_pipe)
+{
+ struct perf_evsel *counter;
+ int err;
+
+ /*
+ * Synthesize other events stuff not carried within
+ * attr event - unit, scale, name
+ */
+ evlist__for_each_entry(evsel_list, counter) {
+ if (!counter->supported)
+ continue;
+
+ /*
+ * Synthesize unit and scale only if it's defined.
+ */
+ if (has_unit(counter)) {
+ err = perf_event__synthesize_event_update_unit(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel unit.\n");
+ return err;
+ }
+ }
+
+ if (has_scale(counter)) {
+ err = perf_event__synthesize_event_update_scale(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel counter.\n");
+ return err;
+ }
+ }
+
+ if (counter->own_cpus) {
+ err = perf_event__synthesize_event_update_cpus(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel cpus.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Name is needed only for pipe output,
+ * perf.data carries event names.
+ */
+ if (is_pipe) {
+ err = perf_event__synthesize_event_update_name(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel name.\n");
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_evlist **pevlist)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 29ccbfdf8724..f28aaaa3a440 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "event.h"
#include "env.h"
+#include "pmu.h"
enum {
HEADER_RESERVED = 0, /* always cleared */
@@ -34,6 +35,7 @@ enum {
HEADER_AUXTRACE,
HEADER_STAT,
HEADER_CACHE,
+ HEADER_SAMPLE_TIME,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -107,6 +109,11 @@ int perf_event__synthesize_features(struct perf_tool *tool,
struct perf_evlist *evlist,
perf_event__handler_t process);
+int perf_event__synthesize_extra_attr(struct perf_tool *tool,
+ struct perf_evlist *evsel_list,
+ perf_event__handler_t process,
+ bool is_pipe);
+
int perf_event__process_feature(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
@@ -166,5 +173,5 @@ int write_padded(struct feat_fd *fd, const void *bf,
*/
int get_cpuid(char *buffer, size_t sz);
-char *get_cpuid_str(void);
+char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused);
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index f6630cb95eff..02721b579746 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -430,7 +430,8 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
- struct perf_env *env);
+ struct perf_env *env,
+ bool warn_lost_event);
int script_browse(const char *script_opt);
#else
static inline
@@ -438,7 +439,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
const char *help __maybe_unused,
struct hist_browser_timer *hbt __maybe_unused,
float min_pcnt __maybe_unused,
- struct perf_env *env __maybe_unused)
+ struct perf_env *env __maybe_unused,
+ bool warn_lost_event __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 5325e65f9711..72db2744876d 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -67,7 +67,6 @@ struct intel_bts {
u64 branches_sample_type;
u64 branches_id;
size_t branches_event_size;
- bool synth_needs_swap;
unsigned long num_events;
};
@@ -303,8 +302,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
event.sample.header.size = bts->branches_event_size;
ret = perf_event__synthesize_sample(&event,
bts->branches_sample_type,
- 0, &sample,
- bts->synth_needs_swap);
+ 0, &sample);
if (ret)
return ret;
}
@@ -841,8 +839,6 @@ static int intel_bts_synth_events(struct intel_bts *bts,
__perf_evsel__sample_size(attr.sample_type);
}
- bts->synth_needs_swap = evsel->needs_swap;
-
return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 10e0814bb8d2..1b704fbea9de 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -11,15 +11,21 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
@(diff -I 2>&1 | grep -q 'option requires an argument' && \
- test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
- diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
- diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
- diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
- || echo "Warning: Intel PT: x86 instruction decoder differs from kernel" >&2 )) || true
+ test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ ((diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder C file at 'tools/perf/util/intel-pt-decoder/insn.c' differs from latest version at 'arch/x86/lib/insn.c'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder C file at 'tools/perf/util/intel-pt-decoder/inat.c' differs from latest version at 'arch/x86/lib/inat.c'" >&2)) && \
+ ((diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder map file at 'tools/perf/util/intel-pt-decoder/x86-opcode-map.txt' differs from latest version at 'arch/x86/lib/x86-opcode-map.txt'" >&2)) && \
+ ((diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder script at 'tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk' differs from latest version at 'arch/x86/tools/gen-insn-attr-x86.awk'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/insn.h' differs from latest version at 'arch/x86/include/asm/insn.h'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/inat.h' differs from latest version at 'arch/x86/include/asm/inat.h'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/inat_types.h' differs from latest version at 'arch/x86/include/asm/inat_types.h'" >&2)))) || true
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
index 125ecd2a300d..52dc8d911173 100644
--- a/tools/perf/util/intel-pt-decoder/inat.h
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -97,6 +97,16 @@
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE 0
+#define INAT_SEG_REG_DEFAULT 1
+#define INAT_SEG_REG_CS 2
+#define INAT_SEG_REG_SS 3
+#define INAT_SEG_REG_DS 4
+#define INAT_SEG_REG_ES 5
+#define INAT_SEG_REG_FS 6
+#define INAT_SEG_REG_GS 7
+
/* Attribute search APIs */
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
+ff: UD0
EndTable
Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
+81: INVVPID Gy,Mdq (66)
82: INVPCID Gy,Mdq (66)
83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
88: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
GrpTable: Grp3_1
0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
2: NOT Eb
3: NEG Eb
4: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
EndTable
GrpTable: Grp10
+# all are UD1
+0: UD1
+1: UD1
+2: UD1
+3: UD1
+4: UD1
+5: UD1
+6: UD1
+7: UD1
EndTable
# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 23f9ba676df0..3773d9c54f45 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -104,8 +104,6 @@ struct intel_pt {
u64 pwrx_id;
u64 cbr_id;
- bool synth_needs_swap;
-
u64 tsc_bit;
u64 mtc_bit;
u64 mtc_freq_bits;
@@ -1101,11 +1099,10 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt,
}
static int intel_pt_inject_event(union perf_event *event,
- struct perf_sample *sample, u64 type,
- bool swapped)
+ struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
- return perf_event__synthesize_sample(event, type, 0, sample, swapped);
+ return perf_event__synthesize_sample(event, type, 0, sample);
}
static inline int intel_pt_opt_inject(struct intel_pt *pt,
@@ -1115,7 +1112,7 @@ static inline int intel_pt_opt_inject(struct intel_pt *pt,
if (!pt->synth_opts.inject)
return 0;
- return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
+ return intel_pt_inject_event(event, sample, type);
}
static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
@@ -2329,8 +2326,6 @@ static int intel_pt_synth_events(struct intel_pt *pt,
id += 1;
}
- pt->synth_needs_swap = evsel->needs_swap;
-
return 0;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6a8d03c3d9b7..b05a67464c03 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
{
int i;
+ if (machine == NULL)
+ return;
+
machine__destroy_kernel_maps(machine);
map_groups__exit(&machine->kmaps);
dsos__exit(&machine->dsos);
@@ -1723,7 +1726,7 @@ static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
- sym, show_sym, show_addr);
+ sym, show_sym, show_addr, ip);
srcline__tree_insert(&map->dso->srclines, ip, srcline);
}
@@ -2201,7 +2204,7 @@ int thread__resolve_callchain(struct thread *thread,
{
int ret = 0;
- callchain_cursor_reset(&callchain_cursor);
+ callchain_cursor_reset(cursor);
if (callchain_param.order == ORDER_CALLEE) {
ret = thread__resolve_callchain_sample(thread, cursor,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6d40efd74402..8fe57031e1a8 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -419,7 +419,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
if (map && map->dso) {
srcline = get_srcline(map->dso,
map__rip_2objdump(map, addr), NULL,
- true, true);
+ true, true, addr);
if (srcline != SRCLINE_UNKNOWN)
ret = fprintf(fp, "%s%s", prefix, srcline);
free_srcline(srcline);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 0ddd9c199227..1ddc3d1d0147 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -20,12 +20,10 @@
#include "pmu.h"
#include "expr.h"
#include "rblist.h"
-#include "pmu.h"
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include "pmu-events/pmu-events.h"
-#include "strbuf.h"
#include "strlist.h"
#include <assert.h>
#include <ctype.h>
@@ -38,6 +36,10 @@ struct metric_event *metricgroup__lookup(struct rblist *metric_events,
struct metric_event me = {
.evsel = evsel
};
+
+ if (!metric_events)
+ return NULL;
+
nd = rblist__find(metric_events, &me);
if (nd)
return container_of(nd, struct metric_event, nd);
@@ -270,7 +272,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw)
{
- struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
int i;
struct rblist groups;
@@ -368,7 +370,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
static int metricgroup__add_metric(const char *metric, struct strbuf *events,
struct list_head *group_list)
{
- struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
int ret = -EINVAL;
int i, j;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 9fe5f9c7d577..91531a7c8fbf 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -21,50 +21,28 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
}
/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
- u64 start, u64 end, u64 *prev)
+static union perf_event *perf_mmap__read(struct perf_mmap *map,
+ u64 *startp, u64 end)
{
unsigned char *data = map->base + page_size;
union perf_event *event = NULL;
- int diff = end - start;
-
- if (check_messup) {
- /*
- * If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and mess up the samples under us.
- *
- * If we somehow ended up ahead of the 'end', we got messed up.
- *
- * In either case, truncate and restart at 'end'.
- */
- if (diff > map->mask / 2 || diff < 0) {
- fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
-
- /*
- * 'end' points to a known good entry, start there.
- */
- start = end;
- diff = 0;
- }
- }
+ int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
- event = (union perf_event *)&data[start & map->mask];
+ event = (union perf_event *)&data[*startp & map->mask];
size = event->header.size;
- if (size < sizeof(event->header) || diff < (int)size) {
- event = NULL;
- goto broken_event;
- }
+ if (size < sizeof(event->header) || diff < (int)size)
+ return NULL;
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
- if ((start & map->mask) + size != ((start + size) & map->mask)) {
- unsigned int offset = start;
+ if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+ unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = map->event_copy;
@@ -79,20 +57,19 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messu
event = (union perf_event *)map->event_copy;
}
- start += size;
+ *startp += size;
}
-broken_event:
- if (prev)
- *prev = start;
-
return event;
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
+/*
+ * legacy interface for mmap read.
+ * Don't use it. Use perf_mmap__read_event().
+ */
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
{
u64 head;
- u64 old = map->prev;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
@@ -102,13 +79,26 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_mess
head = perf_mmap__read_head(map);
- return perf_mmap__read(map, check_messup, old, head, &map->prev);
+ return perf_mmap__read(map, &map->prev, head);
}
-union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
+/*
+ * Read event from ring buffer one by one.
+ * Return one event for each call.
+ *
+ * Usage:
+ * perf_mmap__read_init()
+ * while(event = perf_mmap__read_event()) {
+ * //process the event
+ * perf_mmap__consume()
+ * }
+ * perf_mmap__read_done()
+ */
+union perf_event *perf_mmap__read_event(struct perf_mmap *map,
+ bool overwrite,
+ u64 *startp, u64 end)
{
- u64 head, end;
- u64 start = map->prev;
+ union perf_event *event;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
@@ -116,40 +106,19 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
if (!refcount_read(&map->refcnt))
return NULL;
- head = perf_mmap__read_head(map);
- if (!head)
+ if (startp == NULL)
return NULL;
- /*
- * 'head' pointer starts from 0. Kernel minus sizeof(record) form
- * it each time when kernel writes to it, so in fact 'head' is
- * negative. 'end' pointer is made manually by adding the size of
- * the ring buffer to 'head' pointer, means the validate data can
- * read is the whole ring buffer. If 'end' is positive, the ring
- * buffer has not fully filled, so we must adjust 'end' to 0.
- *
- * However, since both 'head' and 'end' is unsigned, we can't
- * simply compare 'end' against 0. Here we compare '-head' and
- * the size of the ring buffer, where -head is the number of bytes
- * kernel write to the ring buffer.
- */
- if (-head < (u64)(map->mask + 1))
- end = 0;
- else
- end = head + map->mask + 1;
+ /* non-overwirte doesn't pause the ringbuffer */
+ if (!overwrite)
+ end = perf_mmap__read_head(map);
- return perf_mmap__read(map, false, start, end, &map->prev);
-}
+ event = perf_mmap__read(map, startp, end);
-void perf_mmap__read_catchup(struct perf_mmap *map)
-{
- u64 head;
+ if (!overwrite)
+ map->prev = *startp;
- if (!refcount_read(&map->refcnt))
- return;
-
- head = perf_mmap__read_head(map);
- map->prev = head;
+ return event;
}
static bool perf_mmap__empty(struct perf_mmap *map)
@@ -254,18 +223,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
return 0;
}
-static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
- pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
+ pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
- pr_debug("Finished reading backward ring buffer: rewind\n");
+ pr_debug("Finished reading overwrite ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
@@ -275,7 +244,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
- pr_debug("Finished reading backward ring buffer: get start\n");
+ pr_debug("Finished reading overwrite ring buffer: get start\n");
*end = evt_head;
return 0;
}
@@ -287,43 +256,59 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
return -1;
}
-static int rb_find_range(void *data, int mask, u64 head, u64 old,
- u64 *start, u64 *end, bool backward)
+/*
+ * Report the start and end of the available data in ringbuffer
+ */
+int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
+ u64 *startp, u64 *endp)
{
- if (!backward) {
- *start = old;
- *end = head;
- return 0;
+ u64 head = perf_mmap__read_head(md);
+ u64 old = md->prev;
+ unsigned char *data = md->base + page_size;
+ unsigned long size;
+
+ *startp = overwrite ? head : old;
+ *endp = overwrite ? old : head;
+
+ if (*startp == *endp)
+ return -EAGAIN;
+
+ size = *endp - *startp;
+ if (size > (unsigned long)(md->mask) + 1) {
+ if (!overwrite) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+
+ md->prev = head;
+ perf_mmap__consume(md, overwrite);
+ return -EAGAIN;
+ }
+
+ /*
+ * Backward ring buffer is full. We still have a chance to read
+ * most of data from it.
+ */
+ if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
+ return -EINVAL;
}
- return backward_rb_find_range(data, mask, head, start, end);
+ return 0;
}
-int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+int perf_mmap__push(struct perf_mmap *md, bool overwrite,
void *to, int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
- u64 old = md->prev;
- u64 end = head, start = old;
+ u64 end, start;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
- if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
- return -1;
-
- if (start == end)
- return 0;
+ rc = perf_mmap__read_init(md, overwrite, &start, &end);
+ if (rc < 0)
+ return (rc == -EAGAIN) ? 0 : -1;
size = end - start;
- if (size > (unsigned long)(md->mask) + 1) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
-
- md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
- return 0;
- }
if ((start & md->mask) + size != (end & md->mask)) {
buf = &data[start & md->mask];
@@ -346,7 +331,18 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
}
md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
+ perf_mmap__consume(md, overwrite);
out:
return rc;
}
+
+/*
+ * Mandatory for overwrite mode
+ * The direction of overwrite mode is backward.
+ * The last perf_mmap__read() will set tail to map->prev.
+ * Need to correct the map->prev to head which is the end of next read.
+ */
+void perf_mmap__read_done(struct perf_mmap *map)
+{
+ map->prev = perf_mmap__read_head(map);
+}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index efd78b827b05..ec7d3a24e276 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -65,12 +65,10 @@ void perf_mmap__put(struct perf_mmap *map);
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
-void perf_mmap__read_catchup(struct perf_mmap *md);
-
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
- u64 head = ACCESS_ONCE(pc->data_head);
+ u64 head = READ_ONCE(pc->data_head);
rmb();
return head;
}
@@ -86,12 +84,18 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
pc->data_tail = tail;
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
-union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
+
+union perf_event *perf_mmap__read_event(struct perf_mmap *map,
+ bool overwrite,
+ u64 *startp, u64 end);
-int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+int perf_mmap__push(struct perf_mmap *md, bool backward,
void *to, int push(void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map);
+int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
+ u64 *startp, u64 *endp);
+void perf_mmap__read_done(struct perf_mmap *map);
#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 8e09fd2d842f..bad9e0296e9a 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -157,9 +157,8 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
}
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ u64 timestamp, u64 file_offset)
{
- u64 timestamp = sample->time;
struct ordered_event *oevent;
if (!timestamp || timestamp == ~0ULL)
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 96e5292d88e2..8c7a2948593e 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -45,7 +45,7 @@ struct ordered_events {
};
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ u64 timestamp, u64 file_offset);
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index a7fcd95961ef..34589c427e52 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -4,6 +4,9 @@
#include <dirent.h>
#include <errno.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <sys/param.h>
#include "term.h"
#include "../perf.h"
@@ -1116,6 +1119,7 @@ do { \
INIT_LIST_HEAD(&__t->list); \
__t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
__t->val.__name = __val; \
+ __t->weak = term->weak; \
list_add_tail(&__t->list, head_terms); \
} while (0)
@@ -2410,6 +2414,7 @@ static int new_term(struct parse_events_term **_term,
*term = *temp;
INIT_LIST_HEAD(&term->list);
+ term->weak = false;
switch (term->type_val) {
case PARSE_EVENTS__TERM_TYPE_NUM:
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index be337c266697..88108cd11b4c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -101,6 +101,9 @@ struct parse_events_term {
/* error string indexes for within parsed string */
int err_term;
int err_val;
+
+ /* Coming from implicit alias */
+ bool weak;
};
struct parse_events_error {
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index 933f5c6bffb4..ca56ba2dd3da 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <dirent.h>
#include <unistd.h>
static char bad_path[] = "/bad-path/";
@@ -77,3 +78,16 @@ bool is_regular_file(const char *file)
return S_ISREG(st.st_mode);
}
+
+/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
+bool is_directory(const char *base_path, const struct dirent *dent)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ sprintf(path, "%s/%s", base_path, dent->d_name);
+ if (stat(path, &st))
+ return false;
+
+ return S_ISDIR(st.st_mode);
+}
diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
index 14a254ada7eb..f014f905df50 100644
--- a/tools/perf/util/path.h
+++ b/tools/perf/util/path.h
@@ -2,9 +2,12 @@
#ifndef _PERF_PATH_H
#define _PERF_PATH_H
+struct dirent;
+
int path__join(char *bf, size_t size, const char *path1, const char *path2);
int path__join3(char *bf, size_t size, const char *path1, const char *path2, const char *path3);
bool is_regular_file(const char *file);
+bool is_directory(const char *base_path, const struct dirent *dent);
#endif /* _PERF_PATH_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 07cb2ac041d7..57e38fdf0b34 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -12,6 +12,7 @@
#include <dirent.h>
#include <api/fs/fs.h>
#include <locale.h>
+#include <regex.h>
#include "util.h"
#include "pmu.h"
#include "parse-events.h"
@@ -405,6 +406,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
parse_events_terms__purge(&list);
return ret;
}
+ /*
+ * Weak terms don't override command line options,
+ * which we don't want for implicit terms in aliases.
+ */
+ cloned->weak = true;
list_add_tail(&cloned->list, &list);
}
list_splice(&list, terms);
@@ -532,17 +538,45 @@ static bool pmu_is_uncore(const char *name)
}
/*
+ * PMU CORE devices have different name other than cpu in sysfs on some
+ * platforms. looking for possible sysfs files to identify as core device.
+ */
+static int is_pmu_core(const char *name)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return 0;
+
+ /* Look for cpu sysfs (x86 and others) */
+ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
+ if ((stat(path, &st) == 0) &&
+ (strncmp(name, "cpu", strlen("cpu")) == 0))
+ return 1;
+
+ /* Look for cpu sysfs (specific to arm) */
+ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
+ sysfs, name);
+ if (stat(path, &st) == 0)
+ return 1;
+
+ return 0;
+}
+
+/*
* Return the CPU id as a raw string.
*
* Each architecture should provide a more precise id string that
* can be use to match the architecture's "mapfile".
*/
-char * __weak get_cpuid_str(void)
+char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
return NULL;
}
-static char *perf_pmu__getcpuid(void)
+static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
{
char *cpuid;
static bool printed;
@@ -551,7 +585,7 @@ static char *perf_pmu__getcpuid(void)
if (cpuid)
cpuid = strdup(cpuid);
if (!cpuid)
- cpuid = get_cpuid_str();
+ cpuid = get_cpuid_str(pmu);
if (!cpuid)
return NULL;
@@ -562,22 +596,45 @@ static char *perf_pmu__getcpuid(void)
return cpuid;
}
-struct pmu_events_map *perf_pmu__find_map(void)
+struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
{
struct pmu_events_map *map;
- char *cpuid = perf_pmu__getcpuid();
+ char *cpuid = perf_pmu__getcpuid(pmu);
int i;
+ /* on some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ return NULL;
+
i = 0;
for (;;) {
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+
map = &pmu_events_map[i++];
if (!map->table) {
map = NULL;
break;
}
- if (!strcmp(map->cpuid, cpuid))
+ if (regcomp(&re, map->cpuid, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", map->cpuid);
break;
+ }
+
+ match = !regexec(&re, cpuid, 1, pmatch, 0);
+ regfree(&re);
+ if (match) {
+ size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
+
+ /* Verify the entire string matched. */
+ if (match_len == strlen(cpuid))
+ break;
+ }
}
free(cpuid);
return map;
@@ -588,13 +645,14 @@ struct pmu_events_map *perf_pmu__find_map(void)
* to the current running CPU. Then, add all PMU events from that table
* as aliases.
*/
-static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
+static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
{
int i;
struct pmu_events_map *map;
struct pmu_event *pe;
+ const char *name = pmu->name;
- map = perf_pmu__find_map();
+ map = perf_pmu__find_map(pmu);
if (!map)
return;
@@ -603,7 +661,6 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
*/
i = 0;
while (1) {
- const char *pname;
pe = &map->table[i++];
if (!pe->name) {
@@ -612,9 +669,13 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
break;
}
- pname = pe->pmu ? pe->pmu : "cpu";
- if (strncmp(pname, name, strlen(pname)))
- continue;
+ if (!is_pmu_core(name)) {
+ /* check for uncore devices */
+ if (pe->pmu == NULL)
+ continue;
+ if (strncmp(pe->pmu, name, strlen(pe->pmu)))
+ continue;
+ }
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
@@ -656,21 +717,20 @@ static struct perf_pmu *pmu_lookup(const char *name)
if (pmu_aliases(name, &aliases))
return NULL;
- pmu_add_cpu_aliases(&aliases, name);
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
pmu->cpus = pmu_cpumask(name);
-
+ pmu->name = strdup(name);
+ pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
list_splice(&format, &pmu->format);
list_splice(&aliases, &pmu->aliases);
- pmu->name = strdup(name);
- pmu->type = type;
list_add_tail(&pmu->list, &pmus);
pmu->default_config = perf_pmu__get_default_config(pmu);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 27c75e635866..76fecec7b3f9 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,6 +92,6 @@ int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
-struct pmu_events_map *perf_pmu__find_map(void);
+struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b7aaf9b2294d..e1dbc9821617 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1325,27 +1325,30 @@ static int parse_perf_probe_event_name(char **arg, struct perf_probe_event *pev)
{
char *ptr;
- ptr = strchr(*arg, ':');
+ ptr = strpbrk_esc(*arg, ":");
if (ptr) {
*ptr = '\0';
if (!pev->sdt && !is_c_func_name(*arg))
goto ng_name;
- pev->group = strdup(*arg);
+ pev->group = strdup_esc(*arg);
if (!pev->group)
return -ENOMEM;
*arg = ptr + 1;
} else
pev->group = NULL;
- if (!pev->sdt && !is_c_func_name(*arg)) {
+
+ pev->event = strdup_esc(*arg);
+ if (pev->event == NULL)
+ return -ENOMEM;
+
+ if (!pev->sdt && !is_c_func_name(pev->event)) {
+ zfree(&pev->event);
ng_name:
+ zfree(&pev->group);
semantic_error("%s is bad for event name -it must "
"follow C symbol-naming rule.\n", *arg);
return -EINVAL;
}
- pev->event = strdup(*arg);
- if (pev->event == NULL)
- return -ENOMEM;
-
return 0;
}
@@ -1373,7 +1376,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg++;
}
- ptr = strpbrk(arg, ";=@+%");
+ ptr = strpbrk_esc(arg, ";=@+%");
if (pev->sdt) {
if (ptr) {
if (*ptr != '@') {
@@ -1387,7 +1390,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
pev->target = build_id_cache__origname(tmp);
free(tmp);
} else
- pev->target = strdup(ptr + 1);
+ pev->target = strdup_esc(ptr + 1);
if (!pev->target)
return -ENOMEM;
*ptr = '\0';
@@ -1421,13 +1424,14 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
*
* Otherwise, we consider arg to be a function specification.
*/
- if (!strpbrk(arg, "+@%") && (ptr = strpbrk(arg, ";:")) != NULL) {
+ if (!strpbrk_esc(arg, "+@%")) {
+ ptr = strpbrk_esc(arg, ";:");
/* This is a file spec if it includes a '.' before ; or : */
- if (memchr(arg, '.', ptr - arg))
+ if (ptr && memchr(arg, '.', ptr - arg))
file_spec = true;
}
- ptr = strpbrk(arg, ";:+@%");
+ ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -1436,7 +1440,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
if (arg[0] == '\0')
tmp = NULL;
else {
- tmp = strdup(arg);
+ tmp = strdup_esc(arg);
if (tmp == NULL)
return -ENOMEM;
}
@@ -1469,12 +1473,12 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg = ptr;
c = nc;
if (c == ';') { /* Lazy pattern must be the last part */
- pp->lazy_line = strdup(arg);
+ pp->lazy_line = strdup(arg); /* let leave escapes */
if (pp->lazy_line == NULL)
return -ENOMEM;
break;
}
- ptr = strpbrk(arg, ";:+@%");
+ ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -1501,7 +1505,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
semantic_error("SRC@SRC is not allowed.\n");
return -EINVAL;
}
- pp->file = strdup(arg);
+ pp->file = strdup_esc(arg);
if (pp->file == NULL)
return -ENOMEM;
break;
@@ -2573,7 +2577,8 @@ int show_perf_probe_events(struct strfilter *filter)
}
static int get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist, bool allow_suffix)
+ struct strlist *namelist, bool ret_event,
+ bool allow_suffix)
{
int i, ret;
char *p, *nbase;
@@ -2584,13 +2589,13 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
if (!nbase)
return -ENOMEM;
- /* Cut off the dot suffixes (e.g. .const, .isra)*/
- p = strchr(nbase, '.');
+ /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
+ p = strpbrk(nbase, ".@");
if (p && p != nbase)
*p = '\0';
/* Try no suffix number */
- ret = e_snprintf(buf, len, "%s", nbase);
+ ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
goto out;
@@ -2625,6 +2630,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
out:
free(nbase);
+
+ /* Final validation */
+ if (ret >= 0 && !is_c_func_name(buf)) {
+ pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+ buf);
+ ret = -EINVAL;
+ }
+
return ret;
}
@@ -2681,8 +2694,8 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
group = PERFPROBE_GROUP;
/* Get an unused new event name */
- ret = get_new_event_name(buf, 64, event,
- namelist, allow_suffix);
+ ret = get_new_event_name(buf, 64, event, namelist,
+ tev->point.retprobe, allow_suffix);
if (ret < 0)
return ret;
@@ -2792,16 +2805,40 @@ static int find_probe_functions(struct map *map, char *name,
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
+ const char *norm, *ver;
+ char *buf = NULL;
+ bool cut_version = true;
if (map__load(map) < 0)
return 0;
+ /* If user gives a version, don't cut off the version from symbols */
+ if (strchr(name, '@'))
+ cut_version = false;
+
map__for_each_symbol(map, sym, tmp) {
- if (strglobmatch(sym->name, name)) {
+ norm = arch__normalize_symbol_name(sym->name);
+ if (!norm)
+ continue;
+
+ if (cut_version) {
+ /* We don't care about default symbol or not */
+ ver = strchr(norm, '@');
+ if (ver) {
+ buf = strndup(norm, ver - norm);
+ if (!buf)
+ return -ENOMEM;
+ norm = buf;
+ }
+ }
+
+ if (strglobmatch(norm, name)) {
found++;
if (syms && found < probe_conf.max_probes)
syms[found - 1] = sym;
}
+ if (buf)
+ zfree(&buf);
}
return found;
@@ -2847,7 +2884,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
* same name but different addresses, this lists all the symbols.
*/
num_matched_functions = find_probe_functions(map, pp->function, syms);
- if (num_matched_functions == 0) {
+ if (num_matched_functions <= 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
pev->target ? : "kernel");
ret = -ENOENT;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index b4f2f06722a7..7aa0ea64544e 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
+util/memswap.c
util/mmap.c
util/namespaces.c
../lib/bitmap.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 8e49d9cafcfc..b1e999bd21ef 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -864,7 +864,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
&pages, &overwrite))
return NULL;
- if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+ if (perf_evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0dfe27d99458..0efc3258c648 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -101,16 +101,21 @@ void rblist__init(struct rblist *rblist)
return;
}
+void rblist__exit(struct rblist *rblist)
+{
+ struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+ while (next) {
+ pos = next;
+ next = rb_next(pos);
+ rblist__remove_node(rblist, pos);
+ }
+}
+
void rblist__delete(struct rblist *rblist)
{
if (rblist != NULL) {
- struct rb_node *pos, *next = rb_first(&rblist->entries);
-
- while (next) {
- pos = next;
- next = rb_next(pos);
- rblist__remove_node(rblist, pos);
- }
+ rblist__exit(rblist);
free(rblist);
}
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 4c8638a22571..76df15c27f5f 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -29,6 +29,7 @@ struct rblist {
};
void rblist__init(struct rblist *rblist);
+void rblist__exit(struct rblist *rblist);
void rblist__delete(struct rblist *rblist);
int rblist__add_node(struct rblist *rblist, const void *new_entry);
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c7187f067d31..ea070883c593 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -43,7 +43,6 @@
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
-#include "../machine.h"
#include "../call-path.h"
#include "thread_map.h"
#include "cpumap.h"
@@ -500,6 +499,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
PyLong_FromUnsignedLongLong(sample->time));
pydict_set_item_string_decref(dict_sample, "period",
PyLong_FromUnsignedLongLong(sample->period));
+ pydict_set_item_string_decref(dict_sample, "phys_addr",
+ PyLong_FromUnsignedLongLong(sample->phys_addr));
set_sample_read_in_dict(dict_sample, sample, evsel);
pydict_set_item_string_decref(dict, "sample", dict_sample);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5c412310f266..c71ced7db152 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -27,7 +27,6 @@
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset);
@@ -107,17 +106,10 @@ static void perf_session__set_comm_exec(struct perf_session *session)
static int ordered_events__deliver_event(struct ordered_events *oe,
struct ordered_event *event)
{
- struct perf_sample sample;
struct perf_session *session = container_of(oe, struct perf_session,
ordered_events);
- int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
-
- if (ret) {
- pr_err("Can't parse sample, err = %d\n", ret);
- return ret;
- }
- return perf_session__deliver_event(session, event->event, &sample,
+ return perf_session__deliver_event(session, event->event,
session->tool, event->file_offset);
}
@@ -873,9 +865,9 @@ static int process_finished_round(struct perf_tool *tool __maybe_unused,
}
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ u64 timestamp, u64 file_offset)
{
- return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
+ return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
}
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
@@ -1328,20 +1320,26 @@ static int machines__deliver_event(struct machines *machines,
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset)
{
+ struct perf_sample sample;
int ret;
- ret = auxtrace__process_event(session, event, sample, tool);
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample);
+ if (ret) {
+ pr_err("Can't parse sample, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = auxtrace__process_event(session, event, &sample, tool);
if (ret < 0)
return ret;
if (ret > 0)
return 0;
return machines__deliver_event(&session->machines, session->evlist,
- event, sample, tool, file_offset);
+ event, &sample, tool, file_offset);
}
static s64 perf_session__process_user_event(struct perf_session *session,
@@ -1350,10 +1348,11 @@ static s64 perf_session__process_user_event(struct perf_session *session,
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
+ struct perf_sample sample = { .time = 0, };
int fd = perf_data__fd(session->data);
int err;
- dump_event(session->evlist, event, file_offset, NULL);
+ dump_event(session->evlist, event, file_offset, &sample);
/* These events are processed right away */
switch (event->header.type) {
@@ -1495,7 +1494,6 @@ static s64 perf_session__process_event(struct perf_session *session,
{
struct perf_evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
- struct perf_sample sample;
int ret;
if (session->header.needs_swap)
@@ -1509,21 +1507,19 @@ static s64 perf_session__process_event(struct perf_session *session,
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, file_offset);
- /*
- * For all kernel events we get the sample data
- */
- ret = perf_evlist__parse_sample(evlist, event, &sample);
- if (ret)
- return ret;
-
if (tool->ordered_events) {
- ret = perf_session__queue_event(session, event, &sample, file_offset);
+ u64 timestamp = -1ULL;
+
+ ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
+ if (ret && ret != -1)
+ return ret;
+
+ ret = perf_session__queue_event(session, event, timestamp, file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session__deliver_event(session, event, &sample, tool,
- file_offset);
+ return perf_session__deliver_event(session, event, tool, file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
@@ -1777,7 +1773,8 @@ done:
err = perf_session__flush_thread_stacks(session);
out_err:
free(buf);
- perf_session__warn_about_errors(session);
+ if (!tool->no_warn)
+ perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
return err;
@@ -1933,7 +1930,8 @@ out:
err = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
- perf_session__warn_about_errors(session);
+ if (!tool->no_warn)
+ perf_session__warn_about_errors(session);
/*
* We may switching perf.data output, make ordered_events
* reusable.
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index da1434a7c120..da40b4b380ca 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -53,7 +53,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
int perf_session__process_events(struct perf_session *session);
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ u64 timestamp, u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index a00eacdf02ed..2da4d0456a03 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -336,7 +336,7 @@ char *hist_entry__get_srcline(struct hist_entry *he)
return SRCLINE_UNKNOWN;
return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
- he->ms.sym, true, true);
+ he->ms.sym, true, true, he->ip);
}
static int64_t
@@ -380,7 +380,8 @@ sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
left->branch_info->from.al_addr),
left->branch_info->from.sym,
- true, true);
+ true, true,
+ left->branch_info->from.al_addr);
}
if (!right->branch_info->srcline_from) {
struct map *map = right->branch_info->from.map;
@@ -391,7 +392,8 @@ sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
right->branch_info->from.al_addr),
right->branch_info->from.sym,
- true, true);
+ true, true,
+ right->branch_info->from.al_addr);
}
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
}
@@ -423,7 +425,8 @@ sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
left->branch_info->to.al_addr),
left->branch_info->from.sym,
- true, true);
+ true, true,
+ left->branch_info->to.al_addr);
}
if (!right->branch_info->srcline_to) {
struct map *map = right->branch_info->to.map;
@@ -434,7 +437,8 @@ sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
right->branch_info->to.al_addr),
right->branch_info->to.sym,
- true, true);
+ true, true,
+ right->branch_info->to.al_addr);
}
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
}
@@ -465,7 +469,7 @@ static char *hist_entry__get_srcfile(struct hist_entry *e)
return no_srcfile;
sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
- e->ms.sym, false, true, true);
+ e->ms.sym, false, true, true, e->ip);
if (!strcmp(sf, SRCLINE_UNKNOWN))
return no_srcfile;
p = strchr(sf, ':');
@@ -2883,10 +2887,10 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(list, tok);
if (ret == -EINVAL) {
- pr_err("Invalid --fields key: `%s'", tok);
+ ui__error("Invalid --fields key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
- pr_err("Unknown --fields key: `%s'", tok);
+ ui__error("Unknown --fields key: `%s'", tok);
break;
}
}
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index d19f05c56de6..3c21fd059b64 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -496,7 +496,8 @@ out:
#define A2L_FAIL_LIMIT 123
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr, bool unwind_inlines)
+ bool show_sym, bool show_addr, bool unwind_inlines,
+ u64 ip)
{
char *file = NULL;
unsigned line = 0;
@@ -536,7 +537,7 @@ out:
if (sym) {
if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
- addr - sym->start) < 0)
+ ip - sym->start) < 0)
return SRCLINE_UNKNOWN;
} else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
return SRCLINE_UNKNOWN;
@@ -550,9 +551,9 @@ void free_srcline(char *srcline)
}
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr)
+ bool show_sym, bool show_addr, u64 ip)
{
- return __get_srcline(dso, addr, sym, show_sym, show_addr, false);
+ return __get_srcline(dso, addr, sym, show_sym, show_addr, false, ip);
}
struct srcline_node {
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 847b7086182c..b2bb5502fd62 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -11,9 +11,10 @@ struct symbol;
extern bool srcline_full_filename;
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr);
+ bool show_sym, bool show_addr, u64 ip);
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr, bool unwind_inlines);
+ bool show_sym, bool show_addr, bool unwind_inlines,
+ u64 ip);
void free_srcline(char *srcline);
/* insert the srcline into the DSO, which will take ownership */
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 855e35cbb1dc..594d14a02b67 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -9,17 +9,6 @@
#include "expr.h"
#include "metricgroup.h"
-enum {
- CTX_BIT_USER = 1 << 0,
- CTX_BIT_KERNEL = 1 << 1,
- CTX_BIT_HV = 1 << 2,
- CTX_BIT_HOST = 1 << 3,
- CTX_BIT_IDLE = 1 << 4,
- CTX_BIT_MAX = 1 << 5,
-};
-
-#define NUM_CTX CTX_BIT_MAX
-
/*
* AGGR_GLOBAL: Use CPU 0
* AGGR_SOCKET: Use first CPU of socket
@@ -27,36 +16,18 @@ enum {
* AGGR_NONE: Use matching CPU
* AGGR_THREAD: Not supported?
*/
-static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
-static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
-static struct rblist runtime_saved_values;
static bool have_frontend_stalled;
+struct runtime_stat rt_stat;
struct stats walltime_nsecs_stats;
struct saved_value {
struct rb_node rb_node;
struct perf_evsel *evsel;
+ enum stat_type type;
+ int ctx;
int cpu;
+ struct runtime_stat *stat;
struct stats stats;
};
@@ -69,6 +40,30 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
if (a->cpu != b->cpu)
return a->cpu - b->cpu;
+
+ /*
+ * Previously the rbtree was used to link generic metrics.
+ * The keys were evsel/cpu. Now the rbtree is extended to support
+ * per-thread shadow stats. For shadow stats case, the keys
+ * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
+ * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
+ */
+ if (a->type != b->type)
+ return a->type - b->type;
+
+ if (a->ctx != b->ctx)
+ return a->ctx - b->ctx;
+
+ if (a->evsel == NULL && b->evsel == NULL) {
+ if (a->stat == b->stat)
+ return 0;
+
+ if ((char *)a->stat < (char *)b->stat)
+ return -1;
+
+ return 1;
+ }
+
if (a->evsel == b->evsel)
return 0;
if ((char *)a->evsel < (char *)b->evsel)
@@ -87,34 +82,66 @@ static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
return &nd->rb_node;
}
+static void saved_value_delete(struct rblist *rblist __maybe_unused,
+ struct rb_node *rb_node)
+{
+ struct saved_value *v;
+
+ BUG_ON(!rb_node);
+ v = container_of(rb_node, struct saved_value, rb_node);
+ free(v);
+}
+
static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
int cpu,
- bool create)
+ bool create,
+ enum stat_type type,
+ int ctx,
+ struct runtime_stat *st)
{
+ struct rblist *rblist;
struct rb_node *nd;
struct saved_value dm = {
.cpu = cpu,
.evsel = evsel,
+ .type = type,
+ .ctx = ctx,
+ .stat = st,
};
- nd = rblist__find(&runtime_saved_values, &dm);
+
+ rblist = &st->value_list;
+
+ nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
if (create) {
- rblist__add_node(&runtime_saved_values, &dm);
- nd = rblist__find(&runtime_saved_values, &dm);
+ rblist__add_node(rblist, &dm);
+ nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
}
return NULL;
}
+void runtime_stat__init(struct runtime_stat *st)
+{
+ struct rblist *rblist = &st->value_list;
+
+ rblist__init(rblist);
+ rblist->node_cmp = saved_value_cmp;
+ rblist->node_new = saved_value_new;
+ rblist->node_delete = saved_value_delete;
+}
+
+void runtime_stat__exit(struct runtime_stat *st)
+{
+ rblist__exit(&st->value_list);
+}
+
void perf_stat__init_shadow_stats(void)
{
have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
- rblist__init(&runtime_saved_values);
- runtime_saved_values.node_cmp = saved_value_cmp;
- runtime_saved_values.node_new = saved_value_new;
- /* No delete for now */
+ runtime_stat__init(&rt_stat);
}
static int evsel_context(struct perf_evsel *evsel)
@@ -135,36 +162,13 @@ static int evsel_context(struct perf_evsel *evsel)
return ctx;
}
-void perf_stat__reset_shadow_stats(void)
+static void reset_stat(struct runtime_stat *st)
{
+ struct rblist *rblist;
struct rb_node *pos, *next;
- memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
- memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
- memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
- memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
- memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
- memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
- memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
- memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
- memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
- memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
- memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
- memset(runtime_cycles_in_tx_stats, 0,
- sizeof(runtime_cycles_in_tx_stats));
- memset(runtime_transaction_stats, 0,
- sizeof(runtime_transaction_stats));
- memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
- memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
- memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
- memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
- memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
- memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
- memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
- memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
- memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
-
- next = rb_first(&runtime_saved_values.entries);
+ rblist = &st->value_list;
+ next = rb_first(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
@@ -174,13 +178,35 @@ void perf_stat__reset_shadow_stats(void)
}
}
+void perf_stat__reset_shadow_stats(void)
+{
+ reset_stat(&rt_stat);
+ memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+}
+
+void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
+{
+ reset_stat(st);
+}
+
+static void update_runtime_stat(struct runtime_stat *st,
+ enum stat_type type,
+ int ctx, int cpu, u64 count)
+{
+ struct saved_value *v = saved_value_lookup(NULL, cpu, true,
+ type, ctx, st);
+
+ if (v)
+ update_stats(&v->stats, count);
+}
+
/*
* Update various tracking values we maintain to print
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
- int cpu)
+ int cpu, struct runtime_stat *st)
{
int ctx = evsel_context(counter);
@@ -188,50 +214,58 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
- update_stats(&runtime_nsecs_stats[cpu], count);
+ update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
- update_stats(&runtime_transaction_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, ELISION_START))
- update_stats(&runtime_elision_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
- update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
- update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
- update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
- update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
- update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, SMI_NUM))
- update_stats(&runtime_smi_num_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, APERF))
- update_stats(&runtime_aperf_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
if (counter->collect_stat) {
- struct saved_value *v = saved_value_lookup(counter, cpu, true);
+ struct saved_value *v = saved_value_lookup(counter, cpu, true,
+ STAT_NONE, 0, st);
update_stats(&v->stats, count);
}
}
@@ -352,15 +386,40 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
}
}
+static double runtime_stat_avg(struct runtime_stat *st,
+ enum stat_type type, int ctx, int cpu)
+{
+ struct saved_value *v;
+
+ v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ if (!v)
+ return 0.0;
+
+ return avg_stats(&v->stats);
+}
+
+static double runtime_stat_n(struct runtime_stat *st,
+ enum stat_type type, int ctx, int cpu)
+{
+ struct saved_value *v;
+
+ v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ if (!v)
+ return 0.0;
+
+ return v->stats.n;
+}
+
static void print_stalled_cycles_frontend(int cpu,
struct perf_evsel *evsel, double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -376,13 +435,14 @@ static void print_stalled_cycles_frontend(int cpu,
static void print_stalled_cycles_backend(int cpu,
struct perf_evsel *evsel, double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -395,13 +455,14 @@ static void print_stalled_cycles_backend(int cpu,
static void print_branch_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_branches_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -414,13 +475,15 @@ static void print_branch_misses(int cpu,
static void print_l1_dcache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
+
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -433,13 +496,15 @@ static void print_l1_dcache_misses(int cpu,
static void print_l1_icache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
+
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -451,13 +516,14 @@ static void print_l1_icache_misses(int cpu,
static void print_dtlb_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -469,13 +535,14 @@ static void print_dtlb_cache_misses(int cpu,
static void print_itlb_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -487,13 +554,14 @@ static void print_itlb_cache_misses(int cpu,
static void print_ll_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -551,68 +619,72 @@ static double sanitize_val(double x)
return x;
}
-static double td_total_slots(int ctx, int cpu)
+static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
{
- return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
+ return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
}
-static double td_bad_spec(int ctx, int cpu)
+static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
{
double bad_spec = 0;
double total_slots;
double total;
- total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
- avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
- avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
- total_slots = td_total_slots(ctx, cpu);
+ total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
+ runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
+ runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
+
+ total_slots = td_total_slots(ctx, cpu, st);
if (total_slots)
bad_spec = total / total_slots;
return sanitize_val(bad_spec);
}
-static double td_retiring(int ctx, int cpu)
+static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
{
double retiring = 0;
- double total_slots = td_total_slots(ctx, cpu);
- double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
+ double total_slots = td_total_slots(ctx, cpu, st);
+ double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
+ ctx, cpu);
if (total_slots)
retiring = ret_slots / total_slots;
return retiring;
}
-static double td_fe_bound(int ctx, int cpu)
+static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
{
double fe_bound = 0;
- double total_slots = td_total_slots(ctx, cpu);
- double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
+ double total_slots = td_total_slots(ctx, cpu, st);
+ double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
+ ctx, cpu);
if (total_slots)
fe_bound = fetch_bub / total_slots;
return fe_bound;
}
-static double td_be_bound(int ctx, int cpu)
+static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
{
- double sum = (td_fe_bound(ctx, cpu) +
- td_bad_spec(ctx, cpu) +
- td_retiring(ctx, cpu));
+ double sum = (td_fe_bound(ctx, cpu, st) +
+ td_bad_spec(ctx, cpu, st) +
+ td_retiring(ctx, cpu, st));
if (sum == 0)
return 0;
return sanitize_val(1.0 - sum);
}
static void print_smi_cost(int cpu, struct perf_evsel *evsel,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double smi_num, aperf, cycles, cost = 0.0;
int ctx = evsel_context(evsel);
const char *color = NULL;
- smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
- aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
- cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
+ aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
+ cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if ((cycles == 0) || (aperf == 0))
return;
@@ -632,7 +704,8 @@ static void generic_metric(const char *metric_expr,
const char *metric_name,
double avg,
int cpu,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
print_metric_t print_metric = out->print_metric;
struct parse_ctx pctx;
@@ -651,7 +724,8 @@ static void generic_metric(const char *metric_expr,
stats = &walltime_nsecs_stats;
scale = 1e-9;
} else {
- v = saved_value_lookup(metric_events[i], cpu, false);
+ v = saved_value_lookup(metric_events[i], cpu, false,
+ STAT_NONE, 0, st);
if (!v)
break;
stats = &v->stats;
@@ -679,7 +753,8 @@ static void generic_metric(const char *metric_expr,
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
- struct rblist *metric_events)
+ struct rblist *metric_events,
+ struct runtime_stat *st)
{
void *ctxp = out->ctx;
print_metric_t print_metric = out->print_metric;
@@ -690,7 +765,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
int num = 1;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+
if (total) {
ratio = avg / total;
print_metric(ctxp, NULL, "%7.2f ",
@@ -698,8 +774,13 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
} else {
print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
}
- total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
- total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
+
+ total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
+ ctx, cpu);
+
+ total = max(total, runtime_stat_avg(st,
+ STAT_STALLED_CYCLES_BACK,
+ ctx, cpu));
if (total && avg) {
out->new_line(ctxp);
@@ -712,8 +793,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
"stalled cycles per insn", 0);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
- if (runtime_branches_stats[ctx][cpu].n != 0)
- print_branch_misses(cpu, evsel, avg, out);
+ if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
+ print_branch_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all branches", 0);
} else if (
@@ -721,8 +802,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
- print_l1_dcache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
+ print_l1_dcache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
} else if (
@@ -730,8 +812,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_l1_icache_stats[ctx][cpu].n != 0)
- print_l1_icache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
+ print_l1_icache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
} else if (
@@ -739,8 +822,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
- print_dtlb_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
+ print_dtlb_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
} else if (
@@ -748,8 +832,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
- print_itlb_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
+ print_itlb_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
} else if (
@@ -757,27 +842,28 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_ll_cache_stats[ctx][cpu].n != 0)
- print_ll_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
+ print_ll_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
- total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
if (total)
ratio = avg * 100 / total;
- if (runtime_cacherefs_stats[ctx][cpu].n != 0)
+ if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
print_metric(ctxp, NULL, "%8.3f %%",
"of all cache refs", ratio);
else
print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
- print_stalled_cycles_frontend(cpu, evsel, avg, out);
+ print_stalled_cycles_frontend(cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
- print_stalled_cycles_backend(cpu, evsel, avg, out);
+ print_stalled_cycles_backend(cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
- total = avg_stats(&runtime_nsecs_stats[cpu]);
+ total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total) {
ratio = avg / total;
@@ -786,7 +872,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
print_metric(ctxp, NULL, NULL, "Ghz", 0);
}
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+
if (total)
print_metric(ctxp, NULL,
"%7.2f%%", "transactional cycles",
@@ -795,8 +882,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
print_metric(ctxp, NULL, NULL, "transactional cycles",
0);
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
- total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
+
if (total2 < avg)
total2 = avg;
if (total)
@@ -805,19 +893,21 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
- total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
+ ctx, cpu);
if (avg)
ratio = total / avg;
- if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
+ if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
print_metric(ctxp, NULL, "%8.0f",
"cycles / transaction", ratio);
else
print_metric(ctxp, NULL, NULL, "cycles / transaction",
- 0);
+ 0);
} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
- total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
+ ctx, cpu);
if (avg)
ratio = total / avg;
@@ -831,28 +921,28 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
- double fe_bound = td_fe_bound(ctx, cpu);
+ double fe_bound = td_fe_bound(ctx, cpu, st);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
print_metric(ctxp, color, "%8.1f%%", "frontend bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
- double retiring = td_retiring(ctx, cpu);
+ double retiring = td_retiring(ctx, cpu, st);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
print_metric(ctxp, color, "%8.1f%%", "retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
- double bad_spec = td_bad_spec(ctx, cpu);
+ double bad_spec = td_bad_spec(ctx, cpu, st);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
print_metric(ctxp, color, "%8.1f%%", "bad speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
- double be_bound = td_be_bound(ctx, cpu);
+ double be_bound = td_be_bound(ctx, cpu, st);
const char *name = "backend bound";
static int have_recovery_bubbles = -1;
@@ -865,19 +955,19 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (be_bound > 0.2)
color = PERF_COLOR_RED;
- if (td_total_slots(ctx, cpu) > 0)
+ if (td_total_slots(ctx, cpu, st) > 0)
print_metric(ctxp, color, "%8.1f%%", name,
be_bound * 100.);
else
print_metric(ctxp, NULL, NULL, name, 0);
} else if (evsel->metric_expr) {
generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
- evsel->metric_name, avg, cpu, out);
- } else if (runtime_nsecs_stats[cpu].n != 0) {
+ evsel->metric_name, avg, cpu, out, st);
+ } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
char unit = 'M';
char unit_buf[10];
- total = avg_stats(&runtime_nsecs_stats[cpu]);
+ total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total)
ratio = 1000.0 * avg / total;
@@ -888,7 +978,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
- print_smi_cost(cpu, evsel, out);
+ print_smi_cost(cpu, evsel, out, st);
} else {
num = 0;
}
@@ -901,7 +991,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
out->new_line(ctxp);
generic_metric(mexp->metric_expr, mexp->metric_events,
evsel->name, mexp->metric_name,
- avg, cpu, out);
+ avg, cpu, out, st);
}
}
if (num == 0)
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 151e9efd7286..32235657c1ac 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -278,9 +278,16 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
perf_evsel__compute_deltas(evsel, cpu, thread, count);
perf_counts_values__scale(count, config->scale, NULL);
if (config->aggr_mode == AGGR_NONE)
- perf_stat__update_shadow_stats(evsel, count->val, cpu);
- if (config->aggr_mode == AGGR_THREAD)
- perf_stat__update_shadow_stats(evsel, count->val, 0);
+ perf_stat__update_shadow_stats(evsel, count->val, cpu,
+ &rt_stat);
+ if (config->aggr_mode == AGGR_THREAD) {
+ if (config->stats)
+ perf_stat__update_shadow_stats(evsel,
+ count->val, 0, &config->stats[thread]);
+ else
+ perf_stat__update_shadow_stats(evsel,
+ count->val, 0, &rt_stat);
+ }
break;
case AGGR_GLOBAL:
aggr->val += count->val;
@@ -362,7 +369,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
/*
* Save the full runtime - to allow normalization during printout:
*/
- perf_stat__update_shadow_stats(counter, *count, 0);
+ perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
return 0;
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index eefca5c981fd..dbc6f7134f61 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <stdio.h>
#include "xyarray.h"
+#include "rblist.h"
struct stats
{
@@ -43,11 +44,54 @@ enum aggr_mode {
AGGR_UNSET,
};
+enum {
+ CTX_BIT_USER = 1 << 0,
+ CTX_BIT_KERNEL = 1 << 1,
+ CTX_BIT_HV = 1 << 2,
+ CTX_BIT_HOST = 1 << 3,
+ CTX_BIT_IDLE = 1 << 4,
+ CTX_BIT_MAX = 1 << 5,
+};
+
+#define NUM_CTX CTX_BIT_MAX
+
+enum stat_type {
+ STAT_NONE = 0,
+ STAT_NSECS,
+ STAT_CYCLES,
+ STAT_STALLED_CYCLES_FRONT,
+ STAT_STALLED_CYCLES_BACK,
+ STAT_BRANCHES,
+ STAT_CACHEREFS,
+ STAT_L1_DCACHE,
+ STAT_L1_ICACHE,
+ STAT_LL_CACHE,
+ STAT_ITLB_CACHE,
+ STAT_DTLB_CACHE,
+ STAT_CYCLES_IN_TX,
+ STAT_TRANSACTION,
+ STAT_ELISION,
+ STAT_TOPDOWN_TOTAL_SLOTS,
+ STAT_TOPDOWN_SLOTS_ISSUED,
+ STAT_TOPDOWN_SLOTS_RETIRED,
+ STAT_TOPDOWN_FETCH_BUBBLES,
+ STAT_TOPDOWN_RECOVERY_BUBBLES,
+ STAT_SMI_NUM,
+ STAT_APERF,
+ STAT_MAX
+};
+
+struct runtime_stat {
+ struct rblist value_list;
+};
+
struct perf_stat_config {
enum aggr_mode aggr_mode;
bool scale;
FILE *output;
unsigned int interval;
+ struct runtime_stat *stats;
+ int stats_num;
};
void update_stats(struct stats *stats, u64 val);
@@ -67,6 +111,15 @@ static inline void init_stats(struct stats *stats)
struct perf_evsel;
struct perf_evlist;
+struct perf_aggr_thread_value {
+ struct perf_evsel *counter;
+ int id;
+ double uval;
+ u64 val;
+ u64 run;
+ u64 ena;
+};
+
bool __perf_evsel_stat__is(struct perf_evsel *evsel,
enum perf_stat_evsel_id id);
@@ -75,16 +128,20 @@ bool __perf_evsel_stat__is(struct perf_evsel *evsel,
void perf_stat_evsel_id_init(struct perf_evsel *evsel);
+extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
typedef void (*print_metric_t)(void *ctx, const char *color, const char *unit,
const char *fmt, double val);
typedef void (*new_line_t )(void *ctx);
+void runtime_stat__init(struct runtime_stat *st);
+void runtime_stat__exit(struct runtime_stat *st);
void perf_stat__init_shadow_stats(void);
void perf_stat__reset_shadow_stats(void);
+void perf_stat__reset_shadow_per_stat(struct runtime_stat *st);
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
- int cpu);
+ int cpu, struct runtime_stat *st);
struct perf_stat_output_ctx {
void *ctx;
print_metric_t print_metric;
@@ -92,11 +149,11 @@ struct perf_stat_output_ctx {
bool force_header;
};
-struct rblist;
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
- struct rblist *metric_events);
+ struct rblist *metric_events,
+ struct runtime_stat *st);
void perf_stat__collect_metric_expr(struct perf_evlist *);
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index aaa08ee8c717..d8bfd0c4d2cb 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -396,3 +396,49 @@ out_err_overflow:
free(expr);
return NULL;
}
+
+/* Like strpbrk(), but not break if it is right after a backslash (escaped) */
+char *strpbrk_esc(char *str, const char *stopset)
+{
+ char *ptr;
+
+ do {
+ ptr = strpbrk(str, stopset);
+ if (ptr == str ||
+ (ptr == str + 1 && *(ptr - 1) != '\\'))
+ break;
+ str = ptr + 1;
+ } while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
+
+ return ptr;
+}
+
+/* Like strdup, but do not copy a single backslash */
+char *strdup_esc(const char *str)
+{
+ char *s, *d, *p, *ret = strdup(str);
+
+ if (!ret)
+ return NULL;
+
+ d = strchr(ret, '\\');
+ if (!d)
+ return ret;
+
+ s = d + 1;
+ do {
+ if (*s == '\0') {
+ *d = '\0';
+ break;
+ }
+ p = strchr(s + 1, '\\');
+ if (p) {
+ memmove(d, s, p - s);
+ d += p - s;
+ s = p + 1;
+ } else
+ memmove(d, s, strlen(s) + 1);
+ } while (p);
+
+ return ret;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index ee14ca5451ab..4c68a09b97e8 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,7 @@ static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int
return asprintf_expr_inout_ints(var, false, nints, ints);
}
+char *strpbrk_esc(char *str, const char *stopset);
+char *strdup_esc(const char *str);
#endif /* PERF_STRING_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1b67a8639dfe..cc065d4bfafc 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -94,6 +94,11 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
+const char * __weak arch__normalize_symbol_name(const char *name)
+{
+ return name;
+}
+
int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
{
return strcmp(namea, nameb);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index a4f0075b4e5c..0563f33c1eb3 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
void arch__sym_update(struct symbol *s, GElf_Sym *sym);
#endif
+const char *arch__normalize_symbol_name(const char *name);
#define SYMBOL_A 0
#define SYMBOL_B 1
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 6eea7cff3d4e..303bdb84ab5a 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -26,6 +26,10 @@
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
static const char **syscalltbl_native = syscalltbl_x86_64;
+#elif defined(__s390x__)
+#include <asm/syscalls_64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_s390_64;
#endif
struct syscall {
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 446aa7a56f25..6ef01a83b24e 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -64,6 +64,11 @@ static inline bool target__none(struct target *target)
return !target__has_task(target) && !target__has_cpu(target);
}
+static inline bool target__has_per_thread(struct target *target)
+{
+ return target->system_wide && target->per_thread;
+}
+
static inline bool target__uses_dummy_map(struct target *target)
{
bool use_dummy = false;
@@ -73,6 +78,8 @@ static inline bool target__uses_dummy_map(struct target *target)
else if (target__has_task(target) ||
(!target__has_cpu(target) && !target->uses_mmap))
use_dummy = true;
+ else if (target__has_per_thread(target))
+ use_dummy = true;
return use_dummy;
}
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index be0d5a736dea..3e1038f6491c 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -92,7 +92,7 @@ struct thread_map *thread_map__new_by_tid(pid_t tid)
return threads;
}
-struct thread_map *thread_map__new_by_uid(uid_t uid)
+static struct thread_map *__thread_map__new_all_cpus(uid_t uid)
{
DIR *proc;
int max_threads = 32, items, i;
@@ -113,7 +113,6 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
while ((dirent = readdir(proc)) != NULL) {
char *end;
bool grow = false;
- struct stat st;
pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
@@ -121,11 +120,12 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
- if (stat(path, &st) != 0)
- continue;
+ if (uid != UINT_MAX) {
+ struct stat st;
- if (st.st_uid != uid)
- continue;
+ if (stat(path, &st) != 0 || st.st_uid != uid)
+ continue;
+ }
snprintf(path, sizeof(path), "/proc/%d/task", pid);
items = scandir(path, &namelist, filter, NULL);
@@ -178,6 +178,16 @@ out_free_closedir:
goto out_closedir;
}
+struct thread_map *thread_map__new_all_cpus(void)
+{
+ return __thread_map__new_all_cpus(UINT_MAX);
+}
+
+struct thread_map *thread_map__new_by_uid(uid_t uid)
+{
+ return __thread_map__new_all_cpus(uid);
+}
+
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
{
if (pid != -1)
@@ -313,7 +323,7 @@ out_free_threads:
}
struct thread_map *thread_map__new_str(const char *pid, const char *tid,
- uid_t uid)
+ uid_t uid, bool per_thread)
{
if (pid)
return thread_map__new_by_pid_str(pid);
@@ -321,6 +331,9 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid,
if (!tid && uid != UINT_MAX)
return thread_map__new_by_uid(uid);
+ if (per_thread)
+ return thread_map__new_all_cpus();
+
return thread_map__new_by_tid_str(tid);
}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index f15803985435..0a806b99e73c 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -23,6 +23,7 @@ struct thread_map *thread_map__new_dummy(void);
struct thread_map *thread_map__new_by_pid(pid_t pid);
struct thread_map *thread_map__new_by_tid(pid_t tid);
struct thread_map *thread_map__new_by_uid(uid_t uid);
+struct thread_map *thread_map__new_all_cpus(void);
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
struct thread_map *thread_map__new_event(struct thread_map_event *event);
@@ -30,7 +31,7 @@ struct thread_map *thread_map__get(struct thread_map *map);
void thread_map__put(struct thread_map *map);
struct thread_map *thread_map__new_str(const char *pid,
- const char *tid, uid_t uid);
+ const char *tid, uid_t uid, bool per_thread);
struct thread_map *thread_map__new_by_tid_str(const char *tid_str);
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 81927d027417..6193b46050a5 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -6,6 +6,7 @@
#include <time.h>
#include <errno.h>
#include <inttypes.h>
+#include <math.h>
#include "perf.h"
#include "debug.h"
@@ -60,11 +61,10 @@ static int parse_timestr_sec_nsec(struct perf_time_interval *ptime,
return 0;
}
-int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+static int split_start_end(char **start, char **end, const char *ostr, char ch)
{
char *start_str, *end_str;
char *d, *str;
- int rc = 0;
if (ostr == NULL || *ostr == '\0')
return 0;
@@ -74,25 +74,35 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
if (str == NULL)
return -ENOMEM;
- ptime->start = 0;
- ptime->end = 0;
-
- /* str has the format: <start>,<stop>
- * variations: <start>,
- * ,<stop>
- * ,
- */
start_str = str;
- d = strchr(start_str, ',');
+ d = strchr(start_str, ch);
if (d) {
*d = '\0';
++d;
}
end_str = d;
+ *start = start_str;
+ *end = end_str;
+
+ return 0;
+}
+
+int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+{
+ char *start_str = NULL, *end_str;
+ int rc;
+
+ rc = split_start_end(&start_str, &end_str, ostr, ',');
+ if (rc || !start_str)
+ return rc;
+
+ ptime->start = 0;
+ ptime->end = 0;
+
rc = parse_timestr_sec_nsec(ptime, start_str, end_str);
- free(str);
+ free(start_str);
/* make sure end time is after start time if it was given */
if (rc == 0 && ptime->end && ptime->end < ptime->start)
@@ -104,6 +114,245 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
return rc;
}
+static int parse_percent(double *pcnt, char *str)
+{
+ char *c, *endptr;
+ double d;
+
+ c = strchr(str, '%');
+ if (c)
+ *c = '\0';
+ else
+ return -1;
+
+ d = strtod(str, &endptr);
+ if (endptr != str + strlen(str))
+ return -1;
+
+ *pcnt = d / 100.0;
+ return 0;
+}
+
+static int percent_slash_split(char *str, struct perf_time_interval *ptime,
+ u64 start, u64 end)
+{
+ char *p, *end_str;
+ double pcnt, start_pcnt, end_pcnt;
+ u64 total = end - start;
+ int i;
+
+ /*
+ * Example:
+ * 10%/2: select the second 10% slice and the third 10% slice
+ */
+
+ /* We can modify this string since the original one is copied */
+ p = strchr(str, '/');
+ if (!p)
+ return -1;
+
+ *p = '\0';
+ if (parse_percent(&pcnt, str) < 0)
+ return -1;
+
+ p++;
+ i = (int)strtol(p, &end_str, 10);
+ if (*end_str)
+ return -1;
+
+ if (pcnt <= 0.0)
+ return -1;
+
+ start_pcnt = pcnt * (i - 1);
+ end_pcnt = pcnt * i;
+
+ if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
+ end_pcnt < 0.0 || end_pcnt > 1.0) {
+ return -1;
+ }
+
+ ptime->start = start + round(start_pcnt * total);
+ ptime->end = start + round(end_pcnt * total);
+
+ return 0;
+}
+
+static int percent_dash_split(char *str, struct perf_time_interval *ptime,
+ u64 start, u64 end)
+{
+ char *start_str = NULL, *end_str;
+ double start_pcnt, end_pcnt;
+ u64 total = end - start;
+ int ret;
+
+ /*
+ * Example: 0%-10%
+ */
+
+ ret = split_start_end(&start_str, &end_str, str, '-');
+ if (ret || !start_str)
+ return ret;
+
+ if ((parse_percent(&start_pcnt, start_str) != 0) ||
+ (parse_percent(&end_pcnt, end_str) != 0)) {
+ free(start_str);
+ return -1;
+ }
+
+ free(start_str);
+
+ if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
+ end_pcnt < 0.0 || end_pcnt > 1.0 ||
+ start_pcnt > end_pcnt) {
+ return -1;
+ }
+
+ ptime->start = start + round(start_pcnt * total);
+ ptime->end = start + round(end_pcnt * total);
+
+ return 0;
+}
+
+typedef int (*time_pecent_split)(char *, struct perf_time_interval *,
+ u64 start, u64 end);
+
+static int percent_comma_split(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end,
+ time_pecent_split func)
+{
+ char *str, *p1, *p2;
+ int len, ret, i = 0;
+
+ str = strdup(ostr);
+ if (str == NULL)
+ return -ENOMEM;
+
+ len = strlen(str);
+ p1 = str;
+
+ while (p1 < str + len) {
+ if (i >= num) {
+ free(str);
+ return -1;
+ }
+
+ p2 = strchr(p1, ',');
+ if (p2)
+ *p2 = '\0';
+
+ ret = (func)(p1, &ptime_buf[i], start, end);
+ if (ret < 0) {
+ free(str);
+ return -1;
+ }
+
+ pr_debug("start time %d: %" PRIu64 ", ", i, ptime_buf[i].start);
+ pr_debug("end time %d: %" PRIu64 "\n", i, ptime_buf[i].end);
+
+ i++;
+
+ if (p2)
+ p1 = p2 + 1;
+ else
+ break;
+ }
+
+ free(str);
+ return i;
+}
+
+static int one_percent_convert(struct perf_time_interval *ptime_buf,
+ const char *ostr, u64 start, u64 end, char *c)
+{
+ char *str;
+ int len = strlen(ostr), ret;
+
+ /*
+ * c points to '%'.
+ * '%' should be the last character
+ */
+ if (ostr + len - 1 != c)
+ return -1;
+
+ /*
+ * Construct a string like "xx%/1"
+ */
+ str = malloc(len + 3);
+ if (str == NULL)
+ return -ENOMEM;
+
+ memcpy(str, ostr, len);
+ strcpy(str + len, "/1");
+
+ ret = percent_slash_split(str, ptime_buf, start, end);
+ if (ret == 0)
+ ret = 1;
+
+ free(str);
+ return ret;
+}
+
+int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end)
+{
+ char *c;
+
+ /*
+ * ostr example:
+ * 10%/2,10%/3: select the second 10% slice and the third 10% slice
+ * 0%-10%,30%-40%: multiple time range
+ * 50%: just one percent
+ */
+
+ memset(ptime_buf, 0, sizeof(*ptime_buf) * num);
+
+ c = strchr(ostr, '/');
+ if (c) {
+ return percent_comma_split(ptime_buf, num, ostr, start,
+ end, percent_slash_split);
+ }
+
+ c = strchr(ostr, '-');
+ if (c) {
+ return percent_comma_split(ptime_buf, num, ostr, start,
+ end, percent_dash_split);
+ }
+
+ c = strchr(ostr, '%');
+ if (c)
+ return one_percent_convert(ptime_buf, ostr, start, end, c);
+
+ return -1;
+}
+
+struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size)
+{
+ const char *p1, *p2;
+ int i = 1;
+ struct perf_time_interval *ptime;
+
+ /*
+ * At least allocate one time range.
+ */
+ if (!ostr)
+ goto alloc;
+
+ p1 = ostr;
+ while (p1 < ostr + strlen(ostr)) {
+ p2 = strchr(p1, ',');
+ if (!p2)
+ break;
+
+ p1 = p2 + 1;
+ i++;
+ }
+
+alloc:
+ *size = i;
+ ptime = calloc(i, sizeof(*ptime));
+ return ptime;
+}
+
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
{
/* if time is not set don't drop sample */
@@ -119,6 +368,34 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
return false;
}
+bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
+ int num, u64 timestamp)
+{
+ struct perf_time_interval *ptime;
+ int i;
+
+ if ((timestamp == 0) || (num == 0))
+ return false;
+
+ if (num == 1)
+ return perf_time__skip_sample(&ptime_buf[0], timestamp);
+
+ /*
+ * start/end of multiple time ranges must be valid.
+ */
+ for (i = 0; i < num; i++) {
+ ptime = &ptime_buf[i];
+
+ if (timestamp >= ptime->start &&
+ ((timestamp < ptime->end && i < num - 1) ||
+ (timestamp <= ptime->end && i == num - 1))) {
+ break;
+ }
+ }
+
+ return (i == num) ? true : false;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 15b475c50ccf..70b177d2b98c 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -13,8 +13,16 @@ int parse_nsec_time(const char *str, u64 *ptime);
int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr);
+int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end);
+
+struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size);
+
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
+bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
+ int num, u64 timestamp);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 2532b558099b..183c91453522 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -76,6 +76,7 @@ struct perf_tool {
bool ordered_events;
bool ordering_requires_timestamps;
bool namespace_events;
+ bool no_warn;
enum show_feature_header show_feat_hdr;
};
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 7a42f703e858..af873044d33a 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -631,9 +631,8 @@ static unw_accessors_t accessors = {
static int _unwind__prepare_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return 0;
-
thread->addr_space = unw_create_addr_space(&accessors, 0);
if (!thread->addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
@@ -646,17 +645,15 @@ static int _unwind__prepare_access(struct thread *thread)
static void _unwind__flush_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return;
-
unw_flush_cache(thread->addr_space, 0, 0);
}
static void _unwind__finish_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return;
-
unw_destroy_addr_space(thread->addr_space);
}
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 647a1e6b4c7b..b029a5e9ae49 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -3,7 +3,7 @@
#include "thread.h"
#include "session.h"
#include "debug.h"
-#include "arch/common.h"
+#include "env.h"
struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
@@ -39,7 +39,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
if (dso_type == DSO__TYPE_UNKNOWN)
return 0;
- arch = normalize_arch(thread->mg->machine->env->arch);
+ arch = perf_env__arch(thread->mg->machine->env);
if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT)
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index a789f952b3e9..1019bbc5dbd8 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -210,7 +210,7 @@ static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64
size -= ret;
off_in += ret;
- off_out -= ret;
+ off_out += ret;
}
munmap(ptr, off_in + size);
@@ -340,35 +340,15 @@ size_t hex_width(u64 v)
return n;
}
-static int hex(char ch)
-{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- return -1;
-}
-
/*
* While we find nice hex chars, build a long_val.
* Return number of chars processed.
*/
int hex2u64(const char *ptr, u64 *long_val)
{
- const char *p = ptr;
- *long_val = 0;
-
- while (*p) {
- const int hex_val = hex(*p);
+ char *p;
- if (hex_val < 0)
- break;
-
- *long_val = (*long_val << 4) | hex_val;
- p++;
- }
+ *long_val = strtoull(ptr, &p, 16);
return p - ptr;
}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 01434509c2e9..9496365da3d7 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -68,4 +68,14 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+#ifndef O_CLOEXEC
+#ifdef __sparc__
+#define O_CLOEXEC 0x400000
+#elif defined(__alpha__) || defined(__hppa__)
+#define O_CLOEXEC 010000000
+#else
+#define O_CLOEXEC 02000000
+#endif
+#endif
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index 5b38dc2fec4f..dfa6fee1b915 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 6e78413bb2cb..f7032c9ab35e 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 52a39ecf5ca1..e7347edfd4f9 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index ea14eaeb268f..9b5e61b636d9 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index cf9b5a54df92..8b26924e2602 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 025c1b07049d..34c044da433c 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index d6aa40fce2b1..3c679607d1c3 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 0634449156d8..9ad1712e4cf9 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index d686e11936c4..856e1b83407b 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 22c3b4ee1617..f4ef826800cf 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -79,7 +79,7 @@ struct ap_dump_action action_table[AP_MAX_ACTIONS];
u32 current_action = 0;
#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility"
-#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:svxz"
+#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:sv^xz"
/******************************************************************************
*
@@ -100,6 +100,7 @@ static void ap_display_usage(void)
ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
ACPI_OPTION("-s", "Print table summaries only");
ACPI_OPTION("-v", "Display version information");
+ ACPI_OPTION("-vd", "Display build date and time");
ACPI_OPTION("-z", "Verbose mode");
ACPI_USAGE_TEXT("\nTable Options:\n");
@@ -231,10 +232,29 @@ static int ap_do_options(int argc, char **argv)
}
continue;
- case 'v': /* Revision/version */
+ case 'v': /* -v: (Version): signon already emitted, just exit */
- acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
- return (1);
+ switch (acpi_gbl_optarg[0]) {
+ case '^': /* -v: (Version) */
+
+ fprintf(stderr,
+ ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ return (1);
+
+ case 'd':
+
+ fprintf(stderr,
+ ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ printf(ACPI_COMMON_BUILD_TIME);
+ return (1);
+
+ default:
+
+ printf("Unknown option: -v%s\n",
+ acpi_gbl_optarg);
+ return (-1);
+ }
+ break;
case 'z': /* Verbose mode */
diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
index c25a74ae51ba..2bb3eef7d5c1 100644
--- a/tools/power/cpupower/bench/system.c
+++ b/tools/power/cpupower/bench/system.c
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
dprintf("set %s as cpufreq governor\n", governor);
- if (cpupower_is_cpu_online(cpu) != 0) {
+ if (cpupower_is_cpu_online(cpu) != 1) {
perror("cpufreq_cpu_exists");
fprintf(stderr, "error: cpu %u does not exist\n", cpu);
return -1;
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index 3b005c39f068..60beaf5ed2ea 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __CPUPOWER_CPUFREQ_H__
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index 1b5da0066ebf..5b3205f16217 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
{
int num;
char *tmp;
+ int this_cpu;
+
+ this_cpu = sched_getcpu();
/* Assume idle state count is the same for all CPUs */
- cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
+ cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
if (cpuidle_sysfs_monitor.hw_states_num <= 0)
return NULL;
for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
- tmp = cpuidle_state_name(0, num);
+ tmp = cpuidle_state_name(this_cpu, num);
if (tmp == NULL)
continue;
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
free(tmp);
- tmp = cpuidle_state_desc(0, num);
+ tmp = cpuidle_state_desc(this_cpu, num);
if (tmp == NULL)
continue;
strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 0b24dd9d01ff..29f50d4cfea0 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -411,6 +411,16 @@ def set_trace_buffer_size():
print('IO error setting trace buffer size ')
quit()
+def free_trace_buffer():
+ """ Free the trace buffer memory """
+
+ try:
+ open('/sys/kernel/debug/tracing/buffer_size_kb'
+ , 'w').write("1")
+ except:
+ print('IO error setting trace buffer size ')
+ quit()
+
def read_trace_data(filename):
""" Read and parse trace data """
@@ -583,4 +593,9 @@ for root, dirs, files in os.walk('.'):
for f in files:
fix_ownership(f)
+clear_trace_file()
+# Free the memory
+if interval:
+ free_trace_buffer()
+
os.chdir('../../')
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 3fab179b1aba..fcb3ed0be5f8 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -99,5 +99,6 @@ ifneq ($(silent),1)
QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
+ QUIET_UNINST = @printf ' UNINST %s\n' $1;
endif
endif
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index db33b28c5ef3..0392153a0009 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -37,10 +37,12 @@ obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
nfit-y := $(ACPI_SRC)/core.o
nfit-$(CONFIG_X86_MCE) += $(ACPI_SRC)/mce.o
+nfit-y += acpi_nfit_test.o
nfit-y += config_check.o
nd_pmem-y := $(NVDIMM_SRC)/pmem.o
nd_pmem-y += pmem-dax.o
+nd_pmem-y += pmem_test.o
nd_pmem-y += config_check.o
nd_btt-y := $(NVDIMM_SRC)/btt.o
@@ -57,6 +59,7 @@ dax-y += config_check.o
device_dax-y := $(DAX_SRC)/device.o
device_dax-y += dax-dev.o
+device_dax-y += device_dax_test.o
device_dax-y += config_check.o
dax_pmem-y := $(DAX_SRC)/pmem.o
@@ -75,6 +78,7 @@ libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o
libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
+libnvdimm-y += libnvdimm_test.o
libnvdimm-y += config_check.o
obj-m += test/
diff --git a/tools/testing/nvdimm/acpi_nfit_test.c b/tools/testing/nvdimm/acpi_nfit_test.c
new file mode 100644
index 000000000000..43521512e577
--- /dev/null
+++ b/tools/testing/nvdimm/acpi_nfit_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(acpi_nfit);
diff --git a/tools/testing/nvdimm/device_dax_test.c b/tools/testing/nvdimm/device_dax_test.c
new file mode 100644
index 000000000000..24b17bf42429
--- /dev/null
+++ b/tools/testing/nvdimm/device_dax_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(device_dax);
diff --git a/tools/testing/nvdimm/libnvdimm_test.c b/tools/testing/nvdimm/libnvdimm_test.c
new file mode 100644
index 000000000000..00ca30b23932
--- /dev/null
+++ b/tools/testing/nvdimm/libnvdimm_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(libnvdimm);
diff --git a/tools/testing/nvdimm/pmem_test.c b/tools/testing/nvdimm/pmem_test.c
new file mode 100644
index 000000000000..fd38f92275cf
--- /dev/null
+++ b/tools/testing/nvdimm/pmem_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(pmem);
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index e1f75a1914a1..ff9d3a5825e1 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -104,15 +104,14 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
}
EXPORT_SYMBOL(__wrap_devm_memremap);
-void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
- struct percpu_ref *ref, struct vmem_altmap *altmap)
+void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
{
- resource_size_t offset = res->start;
+ resource_size_t offset = pgmap->res.start;
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
if (nfit_res)
return nfit_res->buf + offset - nfit_res->res.start;
- return devm_memremap_pages(dev, res, ref, altmap);
+ return devm_memremap_pages(dev, pgmap);
}
EXPORT_SYMBOL(__wrap_devm_memremap_pages);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 7217b2b953b5..620fa78b3b1b 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -27,6 +27,7 @@
#include <nfit.h>
#include <nd.h>
#include "nfit_test.h"
+#include "../watermark.h"
/*
* Generate an NFIT table to describe the following topology:
@@ -137,6 +138,14 @@ static u32 handle[] = {
static unsigned long dimm_fail_cmd_flags[NUM_DCR];
+struct nfit_test_fw {
+ enum intel_fw_update_state state;
+ u32 context;
+ u64 version;
+ u32 size_received;
+ u64 end_time;
+};
+
struct nfit_test {
struct acpi_nfit_desc acpi_desc;
struct platform_device pdev;
@@ -168,8 +177,11 @@ struct nfit_test {
spinlock_t lock;
} ars_state;
struct device *dimm_dev[NUM_DCR];
+ struct nd_intel_smart *smart;
+ struct nd_intel_smart_threshold *smart_threshold;
struct badrange badrange;
struct work_struct work;
+ struct nfit_test_fw *fw;
};
static struct workqueue_struct *nfit_wq;
@@ -181,6 +193,226 @@ static struct nfit_test *to_nfit_test(struct device *dev)
return container_of(pdev, struct nfit_test, pdev);
}
+static int nd_intel_test_get_fw_info(struct nfit_test *t,
+ struct nd_intel_fw_info *nd_cmd, unsigned int buf_len,
+ int idx)
+{
+ struct device *dev = &t->pdev.dev;
+ struct nfit_test_fw *fw = &t->fw[idx];
+
+ dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
+ __func__, t, nd_cmd, buf_len, idx);
+
+ if (buf_len < sizeof(*nd_cmd))
+ return -EINVAL;
+
+ nd_cmd->status = 0;
+ nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE;
+ nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN;
+ nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL;
+ nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME;
+ nd_cmd->update_cap = 0;
+ nd_cmd->fis_version = INTEL_FW_FIS_VERSION;
+ nd_cmd->run_version = 0;
+ nd_cmd->updated_version = fw->version;
+
+ return 0;
+}
+
+static int nd_intel_test_start_update(struct nfit_test *t,
+ struct nd_intel_fw_start *nd_cmd, unsigned int buf_len,
+ int idx)
+{
+ struct device *dev = &t->pdev.dev;
+ struct nfit_test_fw *fw = &t->fw[idx];
+
+ dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
+ __func__, t, nd_cmd, buf_len, idx);
+
+ if (buf_len < sizeof(*nd_cmd))
+ return -EINVAL;
+
+ if (fw->state != FW_STATE_NEW) {
+ /* extended status, FW update in progress */
+ nd_cmd->status = 0x10007;
+ return 0;
+ }
+
+ fw->state = FW_STATE_IN_PROGRESS;
+ fw->context++;
+ fw->size_received = 0;
+ nd_cmd->status = 0;
+ nd_cmd->context = fw->context;
+
+ dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context);
+
+ return 0;
+}
+
+static int nd_intel_test_send_data(struct nfit_test *t,
+ struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len,
+ int idx)
+{
+ struct device *dev = &t->pdev.dev;
+ struct nfit_test_fw *fw = &t->fw[idx];
+ u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length];
+
+ dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
+ __func__, t, nd_cmd, buf_len, idx);
+
+ if (buf_len < sizeof(*nd_cmd))
+ return -EINVAL;
+
+
+ dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status);
+ dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]);
+ dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1,
+ nd_cmd->data[nd_cmd->length-1]);
+
+ if (fw->state != FW_STATE_IN_PROGRESS) {
+ dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__);
+ *status = 0x5;
+ return 0;
+ }
+
+ if (nd_cmd->context != fw->context) {
+ dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
+ __func__, nd_cmd->context, fw->context);
+ *status = 0x10007;
+ return 0;
+ }
+
+ /*
+ * check offset + len > size of fw storage
+ * check length is > max send length
+ */
+ if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE ||
+ nd_cmd->length > INTEL_FW_MAX_SEND_LEN) {
+ *status = 0x3;
+ dev_dbg(dev, "%s: buffer boundary violation\n", __func__);
+ return 0;
+ }
+
+ fw->size_received += nd_cmd->length;
+ dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n",
+ __func__, nd_cmd->length, fw->size_received);
+ *status = 0;
+ return 0;
+}
+
+static int nd_intel_test_finish_fw(struct nfit_test *t,
+ struct nd_intel_fw_finish_update *nd_cmd,
+ unsigned int buf_len, int idx)
+{
+ struct device *dev = &t->pdev.dev;
+ struct nfit_test_fw *fw = &t->fw[idx];
+
+ dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
+ __func__, t, nd_cmd, buf_len, idx);
+
+ if (fw->state == FW_STATE_UPDATED) {
+ /* update already done, need cold boot */
+ nd_cmd->status = 0x20007;
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: context: %#x ctrl_flags: %#x\n",
+ __func__, nd_cmd->context, nd_cmd->ctrl_flags);
+
+ switch (nd_cmd->ctrl_flags) {
+ case 0: /* finish */
+ if (nd_cmd->context != fw->context) {
+ dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
+ __func__, nd_cmd->context,
+ fw->context);
+ nd_cmd->status = 0x10007;
+ return 0;
+ }
+ nd_cmd->status = 0;
+ fw->state = FW_STATE_VERIFY;
+ /* set 1 second of time for firmware "update" */
+ fw->end_time = jiffies + HZ;
+ break;
+
+ case 1: /* abort */
+ fw->size_received = 0;
+ /* successfully aborted status */
+ nd_cmd->status = 0x40007;
+ fw->state = FW_STATE_NEW;
+ dev_dbg(dev, "%s: abort successful\n", __func__);
+ break;
+
+ default: /* bad control flag */
+ dev_warn(dev, "%s: unknown control flag: %#x\n",
+ __func__, nd_cmd->ctrl_flags);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nd_intel_test_finish_query(struct nfit_test *t,
+ struct nd_intel_fw_finish_query *nd_cmd,
+ unsigned int buf_len, int idx)
+{
+ struct device *dev = &t->pdev.dev;
+ struct nfit_test_fw *fw = &t->fw[idx];
+
+ dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
+ __func__, t, nd_cmd, buf_len, idx);
+
+ if (buf_len < sizeof(*nd_cmd))
+ return -EINVAL;
+
+ if (nd_cmd->context != fw->context) {
+ dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
+ __func__, nd_cmd->context, fw->context);
+ nd_cmd->status = 0x10007;
+ return 0;
+ }
+
+ dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context);
+
+ switch (fw->state) {
+ case FW_STATE_NEW:
+ nd_cmd->updated_fw_rev = 0;
+ nd_cmd->status = 0;
+ dev_dbg(dev, "%s: new state\n", __func__);
+ break;
+
+ case FW_STATE_IN_PROGRESS:
+ /* sequencing error */
+ nd_cmd->status = 0x40007;
+ nd_cmd->updated_fw_rev = 0;
+ dev_dbg(dev, "%s: sequence error\n", __func__);
+ break;
+
+ case FW_STATE_VERIFY:
+ if (time_is_after_jiffies64(fw->end_time)) {
+ nd_cmd->updated_fw_rev = 0;
+ nd_cmd->status = 0x20007;
+ dev_dbg(dev, "%s: still verifying\n", __func__);
+ break;
+ }
+
+ dev_dbg(dev, "%s: transition out verify\n", __func__);
+ fw->state = FW_STATE_UPDATED;
+ /* we are going to fall through if it's "done" */
+ case FW_STATE_UPDATED:
+ nd_cmd->status = 0;
+ /* bogus test version */
+ fw->version = nd_cmd->updated_fw_rev =
+ INTEL_FW_FAKE_VERSION;
+ dev_dbg(dev, "%s: updated\n", __func__);
+ break;
+
+ default: /* we should never get here */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
unsigned int buf_len)
{
@@ -440,39 +672,66 @@ static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
return 0;
}
-static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
+static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len,
+ struct nd_intel_smart *smart_data)
{
- static const struct nd_smart_payload smart_data = {
- .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
- | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
- | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
- .health = ND_SMART_NON_CRITICAL_HEALTH,
- .temperature = 23 * 16,
- .spares = 75,
- .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
- .life_used = 5,
- .shutdown_state = 0,
- .vendor_size = 0,
- };
-
if (buf_len < sizeof(*smart))
return -EINVAL;
- memcpy(smart->data, &smart_data, sizeof(smart_data));
+ memcpy(smart, smart_data, sizeof(*smart));
return 0;
}
-static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
- unsigned int buf_len)
+static int nfit_test_cmd_smart_threshold(
+ struct nd_intel_smart_threshold *out,
+ unsigned int buf_len,
+ struct nd_intel_smart_threshold *smart_t)
{
- static const struct nd_smart_threshold_payload smart_t_data = {
- .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
- .temperature = 40 * 16,
- .spares = 5,
- };
-
if (buf_len < sizeof(*smart_t))
return -EINVAL;
- memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
+ memcpy(out, smart_t, sizeof(*smart_t));
+ return 0;
+}
+
+static void smart_notify(struct device *bus_dev,
+ struct device *dimm_dev, struct nd_intel_smart *smart,
+ struct nd_intel_smart_threshold *thresh)
+{
+ dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
+ __func__, thresh->alarm_control, thresh->spares,
+ smart->spares, thresh->media_temperature,
+ smart->media_temperature, thresh->ctrl_temperature,
+ smart->ctrl_temperature);
+ if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP)
+ && smart->spares
+ <= thresh->spares)
+ || ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP)
+ && smart->media_temperature
+ >= thresh->media_temperature)
+ || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
+ && smart->ctrl_temperature
+ >= thresh->ctrl_temperature)) {
+ device_lock(bus_dev);
+ __acpi_nvdimm_notify(dimm_dev, 0x81);
+ device_unlock(bus_dev);
+ }
+}
+
+static int nfit_test_cmd_smart_set_threshold(
+ struct nd_intel_smart_set_threshold *in,
+ unsigned int buf_len,
+ struct nd_intel_smart_threshold *thresh,
+ struct nd_intel_smart *smart,
+ struct device *bus_dev, struct device *dimm_dev)
+{
+ unsigned int size;
+
+ size = sizeof(*in) - 4;
+ if (buf_len < size)
+ return -EINVAL;
+ memcpy(thresh->data, in, size);
+ in->status = 0;
+ smart_notify(bus_dev, dimm_dev, smart, thresh);
+
return 0;
}
@@ -563,6 +822,52 @@ static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
return 0;
}
+static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
+ struct nd_intel_lss *nd_cmd, unsigned int buf_len)
+{
+ struct device *dev = &t->pdev.dev;
+
+ if (buf_len < sizeof(*nd_cmd))
+ return -EINVAL;
+
+ switch (nd_cmd->enable) {
+ case 0:
+ nd_cmd->status = 0;
+ dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n",
+ __func__);
+ break;
+ case 1:
+ nd_cmd->status = 0;
+ dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n",
+ __func__);
+ break;
+ default:
+ dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable);
+ nd_cmd->status = 0x3;
+ break;
+ }
+
+
+ return 0;
+}
+
+static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
+{
+ int i;
+
+ /* lookup per-dimm data */
+ for (i = 0; i < ARRAY_SIZE(handle); i++)
+ if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
+ break;
+ if (i >= ARRAY_SIZE(handle))
+ return -ENXIO;
+
+ if ((1 << func) & dimm_fail_cmd_flags[i])
+ return -EIO;
+
+ return i;
+}
+
static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)
@@ -591,22 +896,57 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
func = call_pkg->nd_command;
if (call_pkg->nd_family != nfit_mem->family)
return -ENOTTY;
+
+ i = get_dimm(nfit_mem, func);
+ if (i < 0)
+ return i;
+
+ switch (func) {
+ case ND_INTEL_ENABLE_LSS_STATUS:
+ return nd_intel_test_cmd_set_lss_status(t,
+ buf, buf_len);
+ case ND_INTEL_FW_GET_INFO:
+ return nd_intel_test_get_fw_info(t, buf,
+ buf_len, i - t->dcr_idx);
+ case ND_INTEL_FW_START_UPDATE:
+ return nd_intel_test_start_update(t, buf,
+ buf_len, i - t->dcr_idx);
+ case ND_INTEL_FW_SEND_DATA:
+ return nd_intel_test_send_data(t, buf,
+ buf_len, i - t->dcr_idx);
+ case ND_INTEL_FW_FINISH_UPDATE:
+ return nd_intel_test_finish_fw(t, buf,
+ buf_len, i - t->dcr_idx);
+ case ND_INTEL_FW_FINISH_QUERY:
+ return nd_intel_test_finish_query(t, buf,
+ buf_len, i - t->dcr_idx);
+ case ND_INTEL_SMART:
+ return nfit_test_cmd_smart(buf, buf_len,
+ &t->smart[i - t->dcr_idx]);
+ case ND_INTEL_SMART_THRESHOLD:
+ return nfit_test_cmd_smart_threshold(buf,
+ buf_len,
+ &t->smart_threshold[i -
+ t->dcr_idx]);
+ case ND_INTEL_SMART_SET_THRESHOLD:
+ return nfit_test_cmd_smart_set_threshold(buf,
+ buf_len,
+ &t->smart_threshold[i -
+ t->dcr_idx],
+ &t->smart[i - t->dcr_idx],
+ &t->pdev.dev, t->dimm_dev[i]);
+ default:
+ return -ENOTTY;
+ }
}
if (!test_bit(cmd, &cmd_mask)
|| !test_bit(func, &nfit_mem->dsm_mask))
return -ENOTTY;
- /* lookup label space for the given dimm */
- for (i = 0; i < ARRAY_SIZE(handle); i++)
- if (__to_nfit_memdev(nfit_mem)->device_handle ==
- handle[i])
- break;
- if (i >= ARRAY_SIZE(handle))
- return -ENXIO;
-
- if ((1 << func) & dimm_fail_cmd_flags[i])
- return -EIO;
+ i = get_dimm(nfit_mem, func);
+ if (i < 0)
+ return i;
switch (func) {
case ND_CMD_GET_CONFIG_SIZE:
@@ -620,15 +960,6 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
rc = nfit_test_cmd_set_config_data(buf, buf_len,
t->label[i - t->dcr_idx]);
break;
- case ND_CMD_SMART:
- rc = nfit_test_cmd_smart(buf, buf_len);
- break;
- case ND_CMD_SMART_THRESHOLD:
- rc = nfit_test_cmd_smart_threshold(buf, buf_len);
- device_lock(&t->pdev.dev);
- __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
- device_unlock(&t->pdev.dev);
- break;
default:
return -ENOTTY;
}
@@ -872,6 +1203,44 @@ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
NULL,
};
+static void smart_init(struct nfit_test *t)
+{
+ int i;
+ const struct nd_intel_smart_threshold smart_t_data = {
+ .alarm_control = ND_INTEL_SMART_SPARE_TRIP
+ | ND_INTEL_SMART_TEMP_TRIP,
+ .media_temperature = 40 * 16,
+ .ctrl_temperature = 30 * 16,
+ .spares = 5,
+ };
+ const struct nd_intel_smart smart_data = {
+ .flags = ND_INTEL_SMART_HEALTH_VALID
+ | ND_INTEL_SMART_SPARES_VALID
+ | ND_INTEL_SMART_ALARM_VALID
+ | ND_INTEL_SMART_USED_VALID
+ | ND_INTEL_SMART_SHUTDOWN_VALID
+ | ND_INTEL_SMART_MTEMP_VALID,
+ .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
+ .media_temperature = 23 * 16,
+ .ctrl_temperature = 30 * 16,
+ .pmic_temperature = 40 * 16,
+ .spares = 75,
+ .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
+ | ND_INTEL_SMART_TEMP_TRIP,
+ .ait_status = 1,
+ .life_used = 5,
+ .shutdown_state = 0,
+ .vendor_size = 0,
+ .shutdown_count = 100,
+ };
+
+ for (i = 0; i < t->num_dcr; i++) {
+ memcpy(&t->smart[i], &smart_data, sizeof(smart_data));
+ memcpy(&t->smart_threshold[i], &smart_t_data,
+ sizeof(smart_t_data));
+ }
+}
+
static int nfit_test0_alloc(struct nfit_test *t)
{
size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
@@ -881,7 +1250,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
window_size) * NUM_DCR
+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
+ (sizeof(struct acpi_nfit_flush_address)
- + sizeof(u64) * NUM_HINTS) * NUM_DCR;
+ + sizeof(u64) * NUM_HINTS) * NUM_DCR
+ + sizeof(struct acpi_nfit_capabilities);
int i;
t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -939,6 +1309,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
return -ENOMEM;
}
+ smart_init(t);
return ars_state_init(&t->pdev.dev, &t->ars_state);
}
@@ -969,6 +1340,7 @@ static int nfit_test1_alloc(struct nfit_test *t)
if (!t->spa_set[1])
return -ENOMEM;
+ smart_init(t);
return ars_state_init(&t->pdev.dev, &t->ars_state);
}
@@ -993,6 +1365,7 @@ static void nfit_test0_setup(struct nfit_test *t)
struct acpi_nfit_control_region *dcr;
struct acpi_nfit_data_region *bdw;
struct acpi_nfit_flush_address *flush;
+ struct acpi_nfit_capabilities *pcap;
unsigned int offset, i;
/*
@@ -1500,8 +1873,16 @@ static void nfit_test0_setup(struct nfit_test *t)
for (i = 0; i < NUM_HINTS; i++)
flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
+ /* platform capabilities */
+ pcap = nfit_buf + offset + flush_hint_size * 4;
+ pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
+ pcap->header.length = sizeof(*pcap);
+ pcap->highest_capability = 1;
+ pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
+ ACPI_NFIT_CAPABILITY_MEM_FLUSH;
+
if (t->setup_hotplug) {
- offset = offset + flush_hint_size * 4;
+ offset = offset + flush_hint_size * 4 + sizeof(*pcap);
/* dcr-descriptor4: blk */
dcr = nfit_buf + offset;
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
@@ -1642,17 +2023,24 @@ static void nfit_test0_setup(struct nfit_test *t)
set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
- set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
- set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
+ set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en);
+ set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
}
static void nfit_test1_setup(struct nfit_test *t)
@@ -1750,6 +2138,7 @@ static void nfit_test1_setup(struct nfit_test *t)
set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
+ set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
}
static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
@@ -2054,10 +2443,18 @@ static int nfit_test_probe(struct platform_device *pdev)
sizeof(struct nfit_test_dcr *), GFP_KERNEL);
nfit_test->dcr_dma = devm_kcalloc(dev, num,
sizeof(dma_addr_t), GFP_KERNEL);
+ nfit_test->smart = devm_kcalloc(dev, num,
+ sizeof(struct nd_intel_smart), GFP_KERNEL);
+ nfit_test->smart_threshold = devm_kcalloc(dev, num,
+ sizeof(struct nd_intel_smart_threshold),
+ GFP_KERNEL);
+ nfit_test->fw = devm_kcalloc(dev, num,
+ sizeof(struct nfit_test_fw), GFP_KERNEL);
if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
&& nfit_test->label_dma && nfit_test->dcr
&& nfit_test->dcr_dma && nfit_test->flush
- && nfit_test->flush_dma)
+ && nfit_test->flush_dma
+ && nfit_test->fw)
/* pass */;
else
return -ENOMEM;
@@ -2159,6 +2556,11 @@ static __init int nfit_test_init(void)
{
int rc, i;
+ pmem_test();
+ libnvdimm_test();
+ acpi_nfit_test();
+ device_dax_test();
+
nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
nfit_wq = create_singlethread_workqueue("nfit");
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 113b44675a71..428344519cdf 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -84,6 +84,140 @@ struct nd_cmd_ars_err_inj_stat {
} __packed record[0];
} __packed;
+#define ND_INTEL_SMART 1
+#define ND_INTEL_SMART_THRESHOLD 2
+#define ND_INTEL_ENABLE_LSS_STATUS 10
+#define ND_INTEL_FW_GET_INFO 12
+#define ND_INTEL_FW_START_UPDATE 13
+#define ND_INTEL_FW_SEND_DATA 14
+#define ND_INTEL_FW_FINISH_UPDATE 15
+#define ND_INTEL_FW_FINISH_QUERY 16
+#define ND_INTEL_SMART_SET_THRESHOLD 17
+
+#define ND_INTEL_SMART_HEALTH_VALID (1 << 0)
+#define ND_INTEL_SMART_SPARES_VALID (1 << 1)
+#define ND_INTEL_SMART_USED_VALID (1 << 2)
+#define ND_INTEL_SMART_MTEMP_VALID (1 << 3)
+#define ND_INTEL_SMART_CTEMP_VALID (1 << 4)
+#define ND_INTEL_SMART_SHUTDOWN_COUNT_VALID (1 << 5)
+#define ND_INTEL_SMART_AIT_STATUS_VALID (1 << 6)
+#define ND_INTEL_SMART_PTEMP_VALID (1 << 7)
+#define ND_INTEL_SMART_ALARM_VALID (1 << 9)
+#define ND_INTEL_SMART_SHUTDOWN_VALID (1 << 10)
+#define ND_INTEL_SMART_VENDOR_VALID (1 << 11)
+#define ND_INTEL_SMART_SPARE_TRIP (1 << 0)
+#define ND_INTEL_SMART_TEMP_TRIP (1 << 1)
+#define ND_INTEL_SMART_CTEMP_TRIP (1 << 2)
+#define ND_INTEL_SMART_NON_CRITICAL_HEALTH (1 << 0)
+#define ND_INTEL_SMART_CRITICAL_HEALTH (1 << 1)
+#define ND_INTEL_SMART_FATAL_HEALTH (1 << 2)
+
+struct nd_intel_smart {
+ __u32 status;
+ union {
+ struct {
+ __u32 flags;
+ __u8 reserved0[4];
+ __u8 health;
+ __u8 spares;
+ __u8 life_used;
+ __u8 alarm_flags;
+ __u16 media_temperature;
+ __u16 ctrl_temperature;
+ __u32 shutdown_count;
+ __u8 ait_status;
+ __u16 pmic_temperature;
+ __u8 reserved1[8];
+ __u8 shutdown_state;
+ __u32 vendor_size;
+ __u8 vendor_data[92];
+ } __packed;
+ __u8 data[128];
+ };
+} __packed;
+
+struct nd_intel_smart_threshold {
+ __u32 status;
+ union {
+ struct {
+ __u16 alarm_control;
+ __u8 spares;
+ __u16 media_temperature;
+ __u16 ctrl_temperature;
+ __u8 reserved[1];
+ } __packed;
+ __u8 data[8];
+ };
+} __packed;
+
+struct nd_intel_smart_set_threshold {
+ __u16 alarm_control;
+ __u8 spares;
+ __u16 media_temperature;
+ __u16 ctrl_temperature;
+ __u32 status;
+} __packed;
+
+#define INTEL_FW_STORAGE_SIZE 0x100000
+#define INTEL_FW_MAX_SEND_LEN 0xFFEC
+#define INTEL_FW_QUERY_INTERVAL 250000
+#define INTEL_FW_QUERY_MAX_TIME 3000000
+#define INTEL_FW_FIS_VERSION 0x0105
+#define INTEL_FW_FAKE_VERSION 0xffffffffabcd
+
+enum intel_fw_update_state {
+ FW_STATE_NEW = 0,
+ FW_STATE_IN_PROGRESS,
+ FW_STATE_VERIFY,
+ FW_STATE_UPDATED,
+};
+
+struct nd_intel_fw_info {
+ __u32 status;
+ __u32 storage_size;
+ __u32 max_send_len;
+ __u32 query_interval;
+ __u32 max_query_time;
+ __u8 update_cap;
+ __u8 reserved[3];
+ __u32 fis_version;
+ __u64 run_version;
+ __u64 updated_version;
+} __packed;
+
+struct nd_intel_fw_start {
+ __u32 status;
+ __u32 context;
+} __packed;
+
+/* this one has the output first because the variable input data size */
+struct nd_intel_fw_send_data {
+ __u32 context;
+ __u32 offset;
+ __u32 length;
+ __u8 data[0];
+/* this field is not declared due ot variable data from input */
+/* __u32 status; */
+} __packed;
+
+struct nd_intel_fw_finish_update {
+ __u8 ctrl_flags;
+ __u8 reserved[3];
+ __u32 context;
+ __u32 status;
+} __packed;
+
+struct nd_intel_fw_finish_query {
+ __u32 context;
+ __u32 status;
+ __u64 updated_fw_rev;
+} __packed;
+
+struct nd_intel_lss {
+ __u8 enable;
+ __u32 status;
+} __packed;
+
union acpi_object;
typedef void *acpi_handle;
diff --git a/tools/testing/nvdimm/watermark.h b/tools/testing/nvdimm/watermark.h
new file mode 100644
index 000000000000..ed0528757bd4
--- /dev/null
+++ b/tools/testing/nvdimm/watermark.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+#ifndef _TEST_NVDIMM_WATERMARK_H_
+#define _TEST_NVDIMM_WATERMARK_H_
+int pmem_test(void);
+int libnvdimm_test(void);
+int acpi_nfit_test(void);
+int device_dax_test(void);
+
+/*
+ * dummy routine for nfit_test to validate it is linking to the properly
+ * mocked module and not the standard one from the base tree.
+ */
+#define nfit_test_watermark(x) \
+int x##_test(void) \
+{ \
+ pr_debug("%s for nfit_test\n", KBUILD_MODNAME); \
+ return 0; \
+} \
+EXPORT_SYMBOL(x##_test)
+#endif /* _TEST_NVDIMM_WATERMARK_H_ */
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 30cd0b296f1a..44ef9eba5a7a 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -153,11 +153,12 @@ void idr_nowait_test(void)
idr_destroy(&idr);
}
-void idr_get_next_test(void)
+void idr_get_next_test(int base)
{
unsigned long i;
int nextid;
DEFINE_IDR(idr);
+ idr_init_base(&idr, base);
int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
@@ -207,6 +208,7 @@ void idr_checks(void)
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
+ assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
@@ -214,6 +216,23 @@ void idr_checks(void)
assert(idr_is_empty(&idr));
+ idr_set_cursor(&idr, INT_MAX - 3UL);
+ for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
+ struct item *item;
+ unsigned int id;
+ if (i <= INT_MAX)
+ item = item_create(i, 0);
+ else
+ item = item_create(i - INT_MAX - 1, 0);
+
+ id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
+ assert(id == item->index);
+ }
+
+ idr_for_each(&idr, item_idr_free, &idr);
+ idr_destroy(&idr);
+ assert(idr_is_empty(&idr));
+
for (i = 1; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
@@ -226,7 +245,9 @@ void idr_checks(void)
idr_alloc_test();
idr_null_test();
idr_nowait_test();
- idr_get_next_test();
+ idr_get_next_test(0);
+ idr_get_next_test(1);
+ idr_get_next_test(4);
}
/*
@@ -380,7 +401,7 @@ void ida_check_random(void)
do {
ida_pre_get(&ida, GFP_KERNEL);
err = ida_get_new_above(&ida, bit, &id);
- } while (err == -ENOMEM);
+ } while (err == -EAGAIN);
assert(!err);
assert(id == bit);
}
@@ -489,7 +510,7 @@ static void *ida_random_fn(void *arg)
void ida_thread_tests(void)
{
- pthread_t threads[10];
+ pthread_t threads[20];
int i;
for (i = 0; i < ARRAY_SIZE(threads); i++)
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index c3bc3f364f68..426f32f28547 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -17,6 +17,4 @@
#define pr_debug printk
#define pr_cont printk
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
#endif /* _KERNEL_H */
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index eaf599dc2137..7442dfb73b7f 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -116,8 +116,15 @@ ifdef INSTALL_PATH
@# Ask all targets to emit their test scripts
echo "#!/bin/sh" > $(ALL_SCRIPT)
- echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
+ echo "BASE_DIR=\$$(realpath \$$(dirname \$$0))" >> $(ALL_SCRIPT)
+ echo "cd \$$BASE_DIR" >> $(ALL_SCRIPT)
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
+ echo "if [ \"\$$1\" = \"--summary\" ]; then" >> $(ALL_SCRIPT)
+ echo " OUTPUT=\$$BASE_DIR/output.log" >> $(ALL_SCRIPT)
+ echo " cat /dev/null > \$$OUTPUT" >> $(ALL_SCRIPT)
+ echo "else" >> $(ALL_SCRIPT)
+ echo " OUTPUT=/dev/stdout" >> $(ALL_SCRIPT)
+ echo "fi" >> $(ALL_SCRIPT)
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 541d9d7fad5a..cc15af2e54fe 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -3,3 +3,11 @@ test_maps
test_lru_map
test_lpm_map
test_tag
+FEATURE-DUMP.libbpf
+fixdep
+test_align
+test_dev_cgroup
+test_progs
+test_tcpbpf_user
+test_verifier_log
+feature
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 333a48655ee0..5c43c187f27c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+
LIBDIR := ../../../lib
BPFDIR := $(LIBDIR)/bpf
APIDIR := ../../../include/uapi
@@ -10,23 +11,36 @@ ifneq ($(wildcard $(GENHDR)),)
endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf
+LDLIBS += -lcap -lelf -lrt -lpthread
+# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
- test_align test_verifier_log test_dev_cgroup
+ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o
+ sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
+ test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
+ sample_map_ret0.o test_tcpbpf_kern.o
+
+# Order correspond to 'make run_tests' order
+TEST_PROGS := test_kmod.sh \
+ test_libbpf.sh \
+ test_xdp_redirect.sh \
+ test_xdp_meta.sh \
+ test_offload.py
-TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh
+# Compile but not part of 'make run_tests'
+TEST_GEN_PROGS_EXTENDED = test_libbpf_open
include ../lib.mk
-BPFOBJ := $(OUTPUT)/libbpf.a $(OUTPUT)/cgroup_helpers.c
+BPFOBJ := $(OUTPUT)/libbpf.a cgroup_helpers.c
$(TEST_GEN_PROGS): $(BPFOBJ)
+$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a
+
.PHONY: force
# force a rebuild of BPFOBJ when its dependencies are updated
@@ -38,7 +52,7 @@ $(BPFOBJ): force
CLANG ?= clang
LLC ?= llc
-PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
# of full BPF instruction set.
@@ -48,8 +62,13 @@ else
CPU ?= generic
endif
-%.o: %.c
- $(CLANG) -I. -I./include/uapi -I../../../include/uapi \
- -Wno-compare-distinct-pointer-types \
+CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
+ -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
+$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
+
+$(OUTPUT)/%.o: %.c
+ $(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index fd9a17fa8a8b..dde2c11d7771 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -71,6 +71,8 @@ static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_getsockopt;
+static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
+ (void *) BPF_FUNC_sock_ops_cb_flags_set;
static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_sk_redirect_map;
static int (*bpf_sock_map_update)(void *map, void *key, void *value,
@@ -82,7 +84,8 @@ static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
unsigned int buf_size) =
(void *) BPF_FUNC_perf_prog_read_value;
-
+static int (*bpf_override_return)(void *ctx, unsigned long rc) =
+ (void *) BPF_FUNC_override_return;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 52d53ed08769..983dd25d49f4 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -3,3 +3,5 @@ CONFIG_BPF_SYSCALL=y
CONFIG_NET_CLS_BPF=m
CONFIG_BPF_EVENTS=y
CONFIG_TEST_BPF=m
+CONFIG_CGROUP_BPF=y
+CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/sample_map_ret0.c
new file mode 100644
index 000000000000..0756303676ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_map_ret0.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") htab = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(long),
+ .max_entries = 2,
+};
+
+struct bpf_map_def SEC("maps") array = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(long),
+ .max_entries = 2,
+};
+
+/* Sample program which should always load for testing control paths. */
+SEC(".text") int func()
+{
+ __u64 key64 = 0;
+ __u32 key = 0;
+ long *value;
+
+ value = bpf_map_lookup_elem(&htab, &key);
+ if (!value)
+ return 1;
+ value = bpf_map_lookup_elem(&array, &key64);
+ if (!value)
+ return 1;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/sample_ret0.c
new file mode 100644
index 000000000000..fec99750d6ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_ret0.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+
+/* Sample program which should always load for testing control paths. */
+int func()
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
new file mode 100755
index 000000000000..481dccdf140c
--- /dev/null
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+import sys, os, os.path, getopt
+import socket, time
+import subprocess
+import select
+
+def read(sock, n):
+ buf = ''
+ while len(buf) < n:
+ rem = n - len(buf)
+ try: s = sock.recv(rem)
+ except (socket.error), e: return ''
+ buf += s
+ return buf
+
+def send(sock, s):
+ total = len(s)
+ count = 0
+ while count < total:
+ try: n = sock.send(s)
+ except (socket.error), e: n = 0
+ if n == 0:
+ return count;
+ count += n
+ return count
+
+
+serverPort = int(sys.argv[1])
+HostName = socket.gethostname()
+
+# create active socket
+sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+try:
+ sock.connect((HostName, serverPort))
+except socket.error as e:
+ sys.exit(1)
+
+buf = ''
+n = 0
+while n < 1000:
+ buf += '+'
+ n += 1
+
+sock.settimeout(1);
+n = send(sock, buf)
+n = read(sock, 500)
+sys.exit(0)
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
new file mode 100755
index 000000000000..bc454d7d0be2
--- /dev/null
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python2
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+import sys, os, os.path, getopt
+import socket, time
+import subprocess
+import select
+
+def read(sock, n):
+ buf = ''
+ while len(buf) < n:
+ rem = n - len(buf)
+ try: s = sock.recv(rem)
+ except (socket.error), e: return ''
+ buf += s
+ return buf
+
+def send(sock, s):
+ total = len(s)
+ count = 0
+ while count < total:
+ try: n = sock.send(s)
+ except (socket.error), e: n = 0
+ if n == 0:
+ return count;
+ count += n
+ return count
+
+
+SERVER_PORT = 12877
+MAX_PORTS = 2
+
+serverPort = SERVER_PORT
+serverSocket = None
+
+HostName = socket.gethostname()
+
+# create passive socket
+serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+host = socket.gethostname()
+
+try: serverSocket.bind((host, 0))
+except socket.error as msg:
+ print 'bind fails: ', msg
+
+sn = serverSocket.getsockname()
+serverPort = sn[1]
+
+cmdStr = ("./tcp_client.py %d &") % (serverPort)
+os.system(cmdStr)
+
+buf = ''
+n = 0
+while n < 500:
+ buf += '.'
+ n += 1
+
+serverSocket.listen(MAX_PORTS)
+readList = [serverSocket]
+
+while True:
+ readyRead, readyWrite, inError = \
+ select.select(readList, [], [], 2)
+
+ if len(readyRead) > 0:
+ waitCount = 0
+ for sock in readyRead:
+ if sock == serverSocket:
+ (clientSocket, address) = serverSocket.accept()
+ address = str(address[0])
+ readList.append(clientSocket)
+ else:
+ sock.settimeout(1);
+ s = read(sock, 1000)
+ n = send(sock, buf)
+ sock.close()
+ serverSocket.close()
+ sys.exit(0)
+ else:
+ print 'Select timeout!'
+ sys.exit(1)
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 8591c89c0828..ff8bd7e3e50c 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -64,11 +64,11 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv2"},
- {2, "R3=inv4"},
- {3, "R3=inv8"},
- {4, "R3=inv16"},
- {5, "R3=inv32"},
+ {1, "R3_w=inv2"},
+ {2, "R3_w=inv4"},
+ {3, "R3_w=inv8"},
+ {4, "R3_w=inv16"},
+ {5, "R3_w=inv32"},
},
},
{
@@ -92,17 +92,17 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv1"},
- {2, "R3=inv2"},
- {3, "R3=inv4"},
- {4, "R3=inv8"},
- {5, "R3=inv16"},
- {6, "R3=inv1"},
- {7, "R4=inv32"},
- {8, "R4=inv16"},
- {9, "R4=inv8"},
- {10, "R4=inv4"},
- {11, "R4=inv2"},
+ {1, "R3_w=inv1"},
+ {2, "R3_w=inv2"},
+ {3, "R3_w=inv4"},
+ {4, "R3_w=inv8"},
+ {5, "R3_w=inv16"},
+ {6, "R3_w=inv1"},
+ {7, "R4_w=inv32"},
+ {8, "R4_w=inv16"},
+ {9, "R4_w=inv8"},
+ {10, "R4_w=inv4"},
+ {11, "R4_w=inv2"},
},
},
{
@@ -121,12 +121,12 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv4"},
- {2, "R3=inv8"},
- {3, "R3=inv10"},
- {4, "R4=inv8"},
- {5, "R4=inv12"},
- {6, "R4=inv14"},
+ {1, "R3_w=inv4"},
+ {2, "R3_w=inv8"},
+ {3, "R3_w=inv10"},
+ {4, "R4_w=inv8"},
+ {5, "R4_w=inv12"},
+ {6, "R4_w=inv14"},
},
},
{
@@ -143,10 +143,10 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv7"},
- {2, "R3=inv7"},
- {3, "R3=inv14"},
- {4, "R3=inv56"},
+ {1, "R3_w=inv7"},
+ {2, "R3_w=inv7"},
+ {3, "R3_w=inv14"},
+ {4, "R3_w=inv56"},
},
},
@@ -185,18 +185,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
- {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+ {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
{18, "R3=pkt_end(id=0,off=0,imm=0)"},
- {18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
- {20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
+ {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+ {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
},
},
{
@@ -217,16 +217,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+ {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
},
},
{
@@ -257,14 +257,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
- {5, "R5=pkt(id=0,off=14,r=0,imm=0)"},
- {6, "R4=pkt(id=0,off=14,r=0,imm=0)"},
+ {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
+ {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
+ {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
- {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
- {15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+ {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+ {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
},
},
{
@@ -320,11 +320,11 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
- {11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
@@ -336,11 +336,11 @@ static struct bpf_align_test tests[] = {
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4.
*/
- {18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
@@ -352,18 +352,18 @@ static struct bpf_align_test tests[] = {
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {26, "R5=pkt(id=0,off=14,r=8"},
+ {26, "R5_w=pkt(id=0,off=14,r=8"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n).
*/
- {27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
+ {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
@@ -410,11 +410,11 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
- {9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* Packet pointer has (4n+2) offset */
- {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -426,11 +426,11 @@ static struct bpf_align_test tests[] = {
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
- {18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+ {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -446,11 +446,9 @@ static struct bpf_align_test tests[] = {
.insns = {
PREP_PKT_POINTERS,
BPF_MOV64_IMM(BPF_REG_0, 0),
- /* ptr & const => unknown & const */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40),
- /* ptr << const => unknown << const */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ /* (ptr - ptr) << 2 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
/* We have a (4n) value. Let's make a packet offset
* out of it. First add 14, to make it a (4n+2)
@@ -473,20 +471,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
- /* ptr & 0x40 == either 0 or 0x40 */
- {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
- /* ptr << 2 == unknown, (4n) */
- {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
+ {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+ /* (ptr - ptr) << 2 == unknown, (4n) */
+ {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
+ {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
/* Checked s>=0 */
- {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* packet pointer + nonnegative (4n+2) */
- {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
- {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@@ -494,7 +490,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
}
},
{
@@ -530,11 +526,11 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
- {10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* New unknown value in R7 is (4n) */
- {11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Subtracting it from R6 blows our unsigned bounds */
{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
/* Checked s>= 0 */
@@ -583,15 +579,15 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+ {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
/* Adding 14 makes R6 be (4n+2) */
- {11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+ {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
+ {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
/* New unknown value in R7 is (4n), >= 76 */
- {15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+ {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
+ {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 02c85d6c89b0..3489cc283433 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -10,6 +10,8 @@
#include <string.h>
#include <errno.h>
#include <assert.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
@@ -19,19 +21,23 @@
#define DEV_CGROUP_PROG "./dev_cgroup.o"
-#define TEST_CGROUP "test-bpf-based-device-cgroup/"
+#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
int main(int argc, char **argv)
{
+ struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
struct bpf_object *obj;
int error = EXIT_FAILURE;
int prog_fd, cgroup_fd;
__u32 prog_cnt;
+ if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+ perror("Unable to lift memlock rlimit");
+
if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
- goto err;
+ goto out;
}
if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
err:
cleanup_cgroup_environment();
+out:
return error;
}
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
index ed4774d8d6ed..35669ccd4d23 100755
--- a/tools/testing/selftests/bpf/test_kmod.sh
+++ b/tools/testing/selftests/bpf/test_kmod.sh
@@ -10,9 +10,21 @@ test_run()
echo "[ JIT enabled:$1 hardened:$2 ]"
dmesg -C
- insmod $SRC_TREE/lib/test_bpf.ko 2> /dev/null
- if [ $? -ne 0 ]; then
- rc=1
+ if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then
+ insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null
+ if [ $? -ne 0 ]; then
+ rc=1
+ fi
+ else
+ # Use modprobe dry run to check for missing test_bpf module
+ if ! /sbin/modprobe -q -n test_bpf; then
+ echo "test_bpf: [SKIP]"
+ elif /sbin/modprobe -q test_bpf; then
+ echo "test_bpf: ok"
+ else
+ echo "test_bpf: [FAIL]"
+ rc=1
+ fi
fi
rmmod test_bpf 2> /dev/null
dmesg | grep FAIL
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/test_l4lb_noinline.c
new file mode 100644
index 000000000000..ba44a14e6dc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_l4lb_noinline.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+#include "test_iptunnel_common.h"
+#include "bpf_endian.h"
+
+int _version SEC("version") = 1;
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+typedef unsigned int u32;
+
+static u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(u32 *)(k);
+ b += *(u32 *)(k + 4);
+ c += *(u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+ __jhash_final(a, b, c);
+ return c;
+}
+
+static u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+ union {
+ __be32 src;
+ __be32 srcv6[4];
+ };
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ union {
+ __u32 ports;
+ __u16 port16[2];
+ };
+ __u8 proto;
+ __u8 flags;
+};
+
+struct ctl_value {
+ union {
+ __u64 value;
+ __u32 ifindex;
+ __u8 mac[6];
+ };
+};
+
+struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+};
+
+struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+};
+
+struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+};
+
+struct eth_hdr {
+ unsigned char eth_dest[ETH_ALEN];
+ unsigned char eth_source[ETH_ALEN];
+ unsigned short eth_proto;
+};
+
+struct bpf_map_def SEC("maps") vip_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct vip),
+ .value_size = sizeof(struct vip_meta),
+ .max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ch_rings = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = CH_RINGS_SIZE,
+};
+
+struct bpf_map_def SEC("maps") reals = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct real_definition),
+ .max_entries = MAX_REALS,
+};
+
+struct bpf_map_def SEC("maps") stats = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct vip_stats),
+ .max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ctl_array = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct ctl_value),
+ .max_entries = CTL_MAP_SIZE,
+};
+
+static __u32 get_packet_hash(struct packet_description *pckt,
+ bool ipv6)
+{
+ if (ipv6)
+ return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+ pckt->ports, CH_RINGS_SIZE);
+ else
+ return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6)
+{
+ __u32 hash = get_packet_hash(pckt, is_ipv6);
+ __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
+ __u32 *real_pos;
+
+ if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
+ hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+ return 0;
+
+ real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+ if (!real_pos)
+ return false;
+ key = *real_pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+ if (!(*real))
+ return false;
+ return true;
+}
+
+static int parse_icmpv6(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+ return TC_ACT_OK;
+ off += sizeof(struct icmp6hdr);
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return TC_ACT_SHOT;
+ pckt->proto = ip6h->nexthdr;
+ pckt->flags |= F_ICMP;
+ memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+ memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+ return TC_ACT_UNSPEC;
+}
+
+static int parse_icmp(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmphdr *icmp_hdr;
+ struct iphdr *iph;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+ icmp_hdr->code != ICMP_FRAG_NEEDED)
+ return TC_ACT_OK;
+ off += sizeof(struct icmphdr);
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (iph->ihl != 5)
+ return TC_ACT_SHOT;
+ pckt->proto = iph->protocol;
+ pckt->flags |= F_ICMP;
+ pckt->src = iph->daddr;
+ pckt->dst = iph->saddr;
+ return TC_ACT_UNSPEC;
+}
+
+static bool parse_udp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
+{
+ struct udphdr *udp;
+ udp = data + off;
+
+ if (udp + 1 > data_end)
+ return false;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = udp->source;
+ pckt->port16[1] = udp->dest;
+ } else {
+ pckt->port16[0] = udp->dest;
+ pckt->port16[1] = udp->source;
+ }
+ return true;
+}
+
+static bool parse_tcp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
+{
+ struct tcphdr *tcp;
+
+ tcp = data + off;
+ if (tcp + 1 > data_end)
+ return false;
+
+ if (tcp->syn)
+ pckt->flags |= F_SYN_SET;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = tcp->source;
+ pckt->port16[1] = tcp->dest;
+ } else {
+ pckt->port16[0] = tcp->dest;
+ pckt->port16[1] = tcp->source;
+ }
+ return true;
+}
+
+static int process_packet(void *data, __u64 off, void *data_end,
+ bool is_ipv6, struct __sk_buff *skb)
+{
+ void *pkt_start = (void *)(long)skb->data;
+ struct packet_description pckt = {};
+ struct eth_hdr *eth = pkt_start;
+ struct bpf_tunnel_key tkey = {};
+ struct vip_stats *data_stats;
+ struct real_definition *dst;
+ struct vip_meta *vip_info;
+ struct ctl_value *cval;
+ __u32 v4_intf_pos = 1;
+ __u32 v6_intf_pos = 2;
+ struct ipv6hdr *ip6h;
+ struct vip vip = {};
+ struct iphdr *iph;
+ int tun_flag = 0;
+ __u16 pkt_bytes;
+ __u64 iph_len;
+ __u32 ifindex;
+ __u8 protocol;
+ __u32 vip_num;
+ int action;
+
+ tkey.tunnel_ttl = 64;
+ if (is_ipv6) {
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ iph_len = sizeof(struct ipv6hdr);
+ protocol = ip6h->nexthdr;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(ip6h->payload_len);
+ off += iph_len;
+ if (protocol == IPPROTO_FRAGMENT) {
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_ICMPV6) {
+ action = parse_icmpv6(data, data_end, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV6_PLUS_ICMP_HDR;
+ } else {
+ memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+ memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+ }
+ } else {
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ protocol = iph->protocol;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(iph->tot_len);
+ off += IPV4_HDR_LEN_NO_OPT;
+
+ if (iph->frag_off & PCKT_FRAGMENTED)
+ return TC_ACT_SHOT;
+ if (protocol == IPPROTO_ICMP) {
+ action = parse_icmp(data, data_end, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV4_PLUS_ICMP_HDR;
+ } else {
+ pckt.src = iph->saddr;
+ pckt.dst = iph->daddr;
+ }
+ }
+ protocol = pckt.proto;
+
+ if (protocol == IPPROTO_TCP) {
+ if (!parse_tcp(data, off, data_end, &pckt))
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_UDP) {
+ if (!parse_udp(data, off, data_end, &pckt))
+ return TC_ACT_SHOT;
+ } else {
+ return TC_ACT_SHOT;
+ }
+
+ if (is_ipv6)
+ memcpy(vip.daddr.v6, pckt.dstv6, 16);
+ else
+ vip.daddr.v4 = pckt.dst;
+
+ vip.dport = pckt.port16[1];
+ vip.protocol = pckt.proto;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info) {
+ vip.dport = 0;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info)
+ return TC_ACT_SHOT;
+ pckt.port16[1] = 0;
+ }
+
+ if (vip_info->flags & F_HASH_NO_SRC_PORT)
+ pckt.port16[0] = 0;
+
+ if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+ return TC_ACT_SHOT;
+
+ if (dst->flags & F_IPV6) {
+ cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+ tun_flag = BPF_F_TUNINFO_IPV6;
+ } else {
+ cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ tkey.remote_ipv4 = dst->dst;
+ }
+ vip_num = vip_info->vip_num;
+ data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+ if (!data_stats)
+ return TC_ACT_SHOT;
+ data_stats->pkts++;
+ data_stats->bytes += pkt_bytes;
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+ *(u32 *)eth->eth_dest = tkey.remote_ipv4;
+ return bpf_redirect(ifindex, 0);
+}
+
+SEC("l4lb-demo")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct eth_hdr *eth = data;
+ __u32 eth_proto;
+ __u32 nh_off;
+
+ nh_off = sizeof(struct eth_hdr);
+ if (data + nh_off > data_end)
+ return TC_ACT_SHOT;
+ eth_proto = eth->eth_proto;
+ if (eth_proto == bpf_htons(ETH_P_IP))
+ return process_packet(data, nh_off, data_end, false, ctx);
+ else if (eth_proto == bpf_htons(ETH_P_IPV6))
+ return process_packet(data, nh_off, data_end, true, ctx);
+ else
+ return TC_ACT_SHOT;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
new file mode 100755
index 000000000000..d97dc914cd49
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+export TESTNAME=test_libbpf
+
+# Determine selftest success via shell exit code
+exit_handler()
+{
+ if (( $? == 0 )); then
+ echo "selftests: $TESTNAME [PASS]";
+ else
+ echo "$TESTNAME: failed at file $LAST_LOADED" 1>&2
+ echo "selftests: $TESTNAME [FAILED]";
+ fi
+}
+
+libbpf_open_file()
+{
+ LAST_LOADED=$1
+ if [ -n "$VERBOSE" ]; then
+ ./test_libbpf_open $1
+ else
+ ./test_libbpf_open --quiet $1
+ fi
+}
+
+# Exit script immediately (well catched by trap handler) if any
+# program/thing exits with a non-zero status.
+set -e
+
+# (Use 'trap -l' to list meaning of numbers)
+trap exit_handler 0 2 3 6 9
+
+libbpf_open_file test_l4lb.o
+
+# TODO: fix libbpf to load noinline functions
+# [warning] libbpf: incorrect bpf_call opcode
+#libbpf_open_file test_l4lb_noinline.o
+
+# TODO: fix test_xdp_meta.c to load with libbpf
+# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
+#libbpf_open_file test_xdp_meta.o
+
+# TODO: fix libbpf to handle .eh_frame
+# [warning] libbpf: relocation failed: no section(10)
+#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
+
+# Success
+exit 0
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
new file mode 100644
index 000000000000..8fcd1c076add
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ =
+ "Libbpf test program for loading BPF ELF object files";
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <bpf/libbpf.h>
+#include <getopt.h>
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"debug", no_argument, NULL, 'D' },
+ {"quiet", no_argument, NULL, 'q' },
+ {0, 0, NULL, 0 }
+};
+
+static void usage(char *argv[])
+{
+ int i;
+
+ printf("\nDOCUMENTATION:\n%s\n\n", __doc__);
+ printf(" Usage: %s (options-see-below) BPF_FILE\n", argv[0]);
+ printf(" Listing options:\n");
+ for (i = 0; long_options[i].name != 0; i++) {
+ printf(" --%-12s", long_options[i].name);
+ printf(" short-option: -%c",
+ long_options[i].val);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+#define DEFINE_PRINT_FN(name, enabled) \
+static int libbpf_##name(const char *fmt, ...) \
+{ \
+ va_list args; \
+ int ret; \
+ \
+ va_start(args, fmt); \
+ if (enabled) { \
+ fprintf(stderr, "[" #name "] "); \
+ ret = vfprintf(stderr, fmt, args); \
+ } \
+ va_end(args); \
+ return ret; \
+}
+DEFINE_PRINT_FN(warning, 1)
+DEFINE_PRINT_FN(info, 1)
+DEFINE_PRINT_FN(debug, 1)
+
+#define EXIT_FAIL_LIBBPF EXIT_FAILURE
+#define EXIT_FAIL_OPTION 2
+
+int test_walk_progs(struct bpf_object *obj, bool verbose)
+{
+ struct bpf_program *prog;
+ int cnt = 0;
+
+ bpf_object__for_each_program(prog, obj) {
+ cnt++;
+ if (verbose)
+ printf("Prog (count:%d) section_name: %s\n", cnt,
+ bpf_program__title(prog, false));
+ }
+ return 0;
+}
+
+int test_walk_maps(struct bpf_object *obj, bool verbose)
+{
+ struct bpf_map *map;
+ int cnt = 0;
+
+ bpf_map__for_each(map, obj) {
+ cnt++;
+ if (verbose)
+ printf("Map (count:%d) name: %s\n", cnt,
+ bpf_map__name(map));
+ }
+ return 0;
+}
+
+int test_open_file(char *filename, bool verbose)
+{
+ struct bpf_object *bpfobj = NULL;
+ long err;
+
+ if (verbose)
+ printf("Open BPF ELF-file with libbpf: %s\n", filename);
+
+ /* Load BPF ELF object file and check for errors */
+ bpfobj = bpf_object__open(filename);
+ err = libbpf_get_error(bpfobj);
+ if (err) {
+ char err_buf[128];
+ libbpf_strerror(err, err_buf, sizeof(err_buf));
+ if (verbose)
+ printf("Unable to load eBPF objects in file '%s': %s\n",
+ filename, err_buf);
+ return EXIT_FAIL_LIBBPF;
+ }
+ test_walk_progs(bpfobj, verbose);
+ test_walk_maps(bpfobj, verbose);
+
+ if (verbose)
+ printf("Close BPF ELF-file with libbpf: %s\n",
+ bpf_object__name(bpfobj));
+ bpf_object__close(bpfobj);
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ char filename[1024] = { 0 };
+ bool verbose = 1;
+ int longindex = 0;
+ int opt;
+
+ libbpf_set_print(libbpf_warning, libbpf_info, NULL);
+
+ /* Parse commands line args */
+ while ((opt = getopt_long(argc, argv, "hDq",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 'D':
+ libbpf_set_print(libbpf_warning, libbpf_info,
+ libbpf_debug);
+ break;
+ case 'q': /* Use in scripting mode */
+ verbose = 0;
+ break;
+ case 'h':
+ default:
+ usage(argv);
+ return EXIT_FAIL_OPTION;
+ }
+ }
+ if (optind >= argc) {
+ usage(argv);
+ printf("ERROR: Expected BPF_FILE argument after options\n");
+ return EXIT_FAIL_OPTION;
+ }
+ snprintf(filename, sizeof(filename), "%s", argv[optind]);
+
+ return test_open_file(filename, verbose);
+}
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index f61480641b6e..2be87e9ee28d 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -14,6 +14,7 @@
#include <errno.h>
#include <inttypes.h>
#include <linux/bpf.h>
+#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -521,6 +522,218 @@ static void test_lpm_delete(void)
close(map_fd);
}
+static void test_lpm_get_next_key(void)
+{
+ struct bpf_lpm_trie_key *key_p, *next_key_p;
+ size_t key_size;
+ __u32 value = 0;
+ int map_fd;
+
+ key_size = sizeof(*key_p) + sizeof(__u32);
+ key_p = alloca(key_size);
+ next_key_p = alloca(key_size);
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, sizeof(value),
+ 100, BPF_F_NO_PREALLOC);
+ assert(map_fd >= 0);
+
+ /* empty tree. get_next_key should return ENOENT */
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
+ errno == ENOENT);
+
+ /* get and verify the first key, get the second one should fail. */
+ key_p->prefixlen = 16;
+ inet_pton(AF_INET, "192.168.0.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168);
+
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* no exact matching key should get the first one in post order. */
+ key_p->prefixlen = 8;
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168);
+
+ /* add one more element (total two) */
+ key_p->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.0.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* Add one more element (total three) */
+ key_p->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.128.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* Add one more element (total four) */
+ key_p->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.1.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* no exact matching key should return the first one in post order */
+ key_p->prefixlen = 22;
+ inet_pton(AF_INET, "192.168.1.0", key_p->data);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 0);
+
+ close(map_fd);
+}
+
+#define MAX_TEST_KEYS 4
+struct lpm_mt_test_info {
+ int cmd; /* 0: update, 1: delete, 2: lookup, 3: get_next_key */
+ int iter;
+ int map_fd;
+ struct {
+ __u32 prefixlen;
+ __u32 data;
+ } key[MAX_TEST_KEYS];
+};
+
+static void *lpm_test_command(void *arg)
+{
+ int i, j, ret, iter, key_size;
+ struct lpm_mt_test_info *info = arg;
+ struct bpf_lpm_trie_key *key_p;
+
+ key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
+ key_p = alloca(key_size);
+ for (iter = 0; iter < info->iter; iter++)
+ for (i = 0; i < MAX_TEST_KEYS; i++) {
+ /* first half of iterations in forward order,
+ * and second half in backward order.
+ */
+ j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1;
+ key_p->prefixlen = info->key[j].prefixlen;
+ memcpy(key_p->data, &info->key[j].data, sizeof(__u32));
+ if (info->cmd == 0) {
+ __u32 value = j;
+ /* update must succeed */
+ assert(bpf_map_update_elem(info->map_fd, key_p, &value, 0) == 0);
+ } else if (info->cmd == 1) {
+ ret = bpf_map_delete_elem(info->map_fd, key_p);
+ assert(ret == 0 || errno == ENOENT);
+ } else if (info->cmd == 2) {
+ __u32 value;
+ ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
+ assert(ret == 0 || errno == ENOENT);
+ } else {
+ struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
+ ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
+ assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
+ }
+ }
+
+ // Pass successful exit info back to the main thread
+ pthread_exit((void *)info);
+}
+
+static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd)
+{
+ info->iter = 2000;
+ info->map_fd = map_fd;
+ info->key[0].prefixlen = 16;
+ inet_pton(AF_INET, "192.168.0.0", &info->key[0].data);
+ info->key[1].prefixlen = 24;
+ inet_pton(AF_INET, "192.168.0.0", &info->key[1].data);
+ info->key[2].prefixlen = 24;
+ inet_pton(AF_INET, "192.168.128.0", &info->key[2].data);
+ info->key[3].prefixlen = 24;
+ inet_pton(AF_INET, "192.168.1.0", &info->key[3].data);
+}
+
+static void test_lpm_multi_thread(void)
+{
+ struct lpm_mt_test_info info[4];
+ size_t key_size, value_size;
+ pthread_t thread_id[4];
+ int i, map_fd;
+ void *ret;
+
+ /* create a trie */
+ value_size = sizeof(__u32);
+ key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
+ map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, value_size,
+ 100, BPF_F_NO_PREALLOC);
+
+ /* create 4 threads to test update, delete, lookup and get_next_key */
+ setup_lpm_mt_test_info(&info[0], map_fd);
+ for (i = 0; i < 4; i++) {
+ if (i != 0)
+ memcpy(&info[i], &info[0], sizeof(info[i]));
+ info[i].cmd = i;
+ assert(pthread_create(&thread_id[i], NULL, &lpm_test_command, &info[i]) == 0);
+ }
+
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 && ret == (void *)&info[i]);
+
+ close(map_fd);
+}
+
int main(void)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
@@ -545,6 +758,10 @@ int main(void)
test_lpm_delete();
+ test_lpm_get_next_key();
+
+ test_lpm_multi_thread();
+
printf("test_lpm: OK\n");
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 040356ecc862..436c4c72414f 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -242,7 +242,7 @@ static void test_hashmap_percpu(int task, void *data)
static void test_hashmap_walk(int task, void *data)
{
- int fd, i, max_entries = 100000;
+ int fd, i, max_entries = 1000;
long long key, value, next_key;
bool next_key_valid = true;
@@ -463,7 +463,7 @@ static void test_devmap(int task, void *data)
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
static void test_sockmap(int tasks, void *data)
{
- int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
+ int one = 1, map_fd_rx = 0, map_fd_tx = 0, map_fd_break, s, sc, rc;
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
int ports[] = {50200, 50201, 50202, 50204};
int err, i, fd, udp, sfd[6] = {0xdeadbeef};
@@ -868,9 +868,12 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap;
}
- /* Test map close sockets */
- for (i = 0; i < 6; i++)
+ /* Test map close sockets and empty maps */
+ for (i = 0; i < 6; i++) {
+ bpf_map_delete_elem(map_fd_tx, &i);
+ bpf_map_delete_elem(map_fd_rx, &i);
close(sfd[i]);
+ }
close(fd);
close(map_fd_rx);
bpf_object__close(obj);
@@ -881,8 +884,13 @@ out:
printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno));
exit(1);
out_sockmap:
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
+ if (map_fd_tx)
+ bpf_map_delete_elem(map_fd_tx, &i);
+ if (map_fd_rx)
+ bpf_map_delete_elem(map_fd_rx, &i);
close(sfd[i]);
+ }
close(fd);
exit(1);
}
@@ -931,8 +939,12 @@ static void test_map_large(void)
close(fd);
}
-static void run_parallel(int tasks, void (*fn)(int task, void *data),
- void *data)
+#define run_parallel(N, FN, DATA) \
+ printf("Fork %d tasks to '" #FN "'\n", N); \
+ __run_parallel(N, FN, DATA)
+
+static void __run_parallel(int tasks, void (*fn)(int task, void *data),
+ void *data)
{
pid_t pid[tasks];
int i;
@@ -972,7 +984,7 @@ static void test_map_stress(void)
#define DO_UPDATE 1
#define DO_DELETE 0
-static void do_work(int fn, void *data)
+static void test_update_delete(int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
@@ -1012,7 +1024,7 @@ static void test_map_parallel(void)
*/
data[0] = fd;
data[1] = DO_UPDATE;
- run_parallel(TASKS, do_work, data);
+ run_parallel(TASKS, test_update_delete, data);
/* Check that key=0 is already there. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
@@ -1035,7 +1047,7 @@ static void test_map_parallel(void)
/* Now let's delete all elemenets in parallel. */
data[1] = DO_DELETE;
- run_parallel(TASKS, do_work, data);
+ run_parallel(TASKS, test_update_delete, data);
/* Nothing should be left. */
key = -1;
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
new file mode 100755
index 000000000000..e78aad0a68bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2017 Netronome Systems, Inc.
+#
+# This software is licensed under the GNU General License Version 2,
+# June 1991 as shown in the file COPYING in the top-level directory of this
+# source tree.
+#
+# THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+# OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+# THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+from datetime import datetime
+import argparse
+import json
+import os
+import pprint
+import random
+import string
+import struct
+import subprocess
+import time
+
+logfile = None
+log_level = 1
+skip_extack = False
+bpf_test_dir = os.path.dirname(os.path.realpath(__file__))
+pp = pprint.PrettyPrinter()
+devs = [] # devices we created for clean up
+files = [] # files to be removed
+netns = [] # net namespaces to be removed
+
+def log_get_sec(level=0):
+ return "*" * (log_level + level)
+
+def log_level_inc(add=1):
+ global log_level
+ log_level += add
+
+def log_level_dec(sub=1):
+ global log_level
+ log_level -= sub
+
+def log_level_set(level):
+ global log_level
+ log_level = level
+
+def log(header, data, level=None):
+ """
+ Output to an optional log.
+ """
+ if logfile is None:
+ return
+ if level is not None:
+ log_level_set(level)
+
+ if not isinstance(data, str):
+ data = pp.pformat(data)
+
+ if len(header):
+ logfile.write("\n" + log_get_sec() + " ")
+ logfile.write(header)
+ if len(header) and len(data.strip()):
+ logfile.write("\n")
+ logfile.write(data)
+
+def skip(cond, msg):
+ if not cond:
+ return
+ print("SKIP: " + msg)
+ log("SKIP: " + msg, "", level=1)
+ os.sys.exit(0)
+
+def fail(cond, msg):
+ if not cond:
+ return
+ print("FAIL: " + msg)
+ log("FAIL: " + msg, "", level=1)
+ os.sys.exit(1)
+
+def start_test(msg):
+ log(msg, "", level=1)
+ log_level_inc()
+ print(msg)
+
+def cmd(cmd, shell=True, include_stderr=False, background=False, fail=True):
+ """
+ Run a command in subprocess and return tuple of (retval, stdout);
+ optionally return stderr as well as third value.
+ """
+ proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ if background:
+ msg = "%s START: %s" % (log_get_sec(1),
+ datetime.now().strftime("%H:%M:%S.%f"))
+ log("BKG " + proc.args, msg)
+ return proc
+
+ return cmd_result(proc, include_stderr=include_stderr, fail=fail)
+
+def cmd_result(proc, include_stderr=False, fail=False):
+ stdout, stderr = proc.communicate()
+ stdout = stdout.decode("utf-8")
+ stderr = stderr.decode("utf-8")
+ proc.stdout.close()
+ proc.stderr.close()
+
+ stderr = "\n" + stderr
+ if stderr[-1] == "\n":
+ stderr = stderr[:-1]
+
+ sec = log_get_sec(1)
+ log("CMD " + proc.args,
+ "RETCODE: %d\n%s STDOUT:\n%s%s STDERR:%s\n%s END: %s" %
+ (proc.returncode, sec, stdout, sec, stderr,
+ sec, datetime.now().strftime("%H:%M:%S.%f")))
+
+ if proc.returncode != 0 and fail:
+ if len(stderr) > 0 and stderr[-1] == "\n":
+ stderr = stderr[:-1]
+ raise Exception("Command failed: %s\n%s" % (proc.args, stderr))
+
+ if include_stderr:
+ return proc.returncode, stdout, stderr
+ else:
+ return proc.returncode, stdout
+
+def rm(f):
+ cmd("rm -f %s" % (f))
+ if f in files:
+ files.remove(f)
+
+def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False):
+ params = ""
+ if JSON:
+ params += "%s " % (flags["json"])
+
+ if ns != "":
+ ns = "ip netns exec %s " % (ns)
+
+ if include_stderr:
+ ret, stdout, stderr = cmd(ns + name + " " + params + args,
+ fail=fail, include_stderr=True)
+ else:
+ ret, stdout = cmd(ns + name + " " + params + args,
+ fail=fail, include_stderr=False)
+
+ if JSON and len(stdout.strip()) != 0:
+ out = json.loads(stdout)
+ else:
+ out = stdout
+
+ if include_stderr:
+ return ret, out, stderr
+ else:
+ return ret, out
+
+def bpftool(args, JSON=True, ns="", fail=True):
+ return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+
+def bpftool_prog_list(expected=None, ns=""):
+ _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+ if expected is not None:
+ if len(progs) != expected:
+ fail(True, "%d BPF programs loaded, expected %d" %
+ (len(progs), expected))
+ return progs
+
+def bpftool_map_list(expected=None, ns=""):
+ _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+ if expected is not None:
+ if len(maps) != expected:
+ fail(True, "%d BPF maps loaded, expected %d" %
+ (len(maps), expected))
+ return maps
+
+def bpftool_prog_list_wait(expected=0, n_retry=20):
+ for i in range(n_retry):
+ nprogs = len(bpftool_prog_list())
+ if nprogs == expected:
+ return
+ time.sleep(0.05)
+ raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs))
+
+def bpftool_map_list_wait(expected=0, n_retry=20):
+ for i in range(n_retry):
+ nmaps = len(bpftool_map_list())
+ if nmaps == expected:
+ return
+ time.sleep(0.05)
+ raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps))
+
+def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False):
+ if force:
+ args = "-force " + args
+ return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns,
+ fail=fail, include_stderr=include_stderr)
+
+def tc(args, JSON=True, ns="", fail=True, include_stderr=False):
+ return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns,
+ fail=fail, include_stderr=include_stderr)
+
+def ethtool(dev, opt, args, fail=True):
+ return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail)
+
+def bpf_obj(name, sec=".text", path=bpf_test_dir,):
+ return "obj %s sec %s" % (os.path.join(path, name), sec)
+
+def bpf_pinned(name):
+ return "pinned %s" % (name)
+
+def bpf_bytecode(bytecode):
+ return "bytecode \"%s\"" % (bytecode)
+
+def mknetns(n_retry=10):
+ for i in range(n_retry):
+ name = ''.join([random.choice(string.ascii_letters) for i in range(8)])
+ ret, _ = ip("netns add %s" % (name), fail=False)
+ if ret == 0:
+ netns.append(name)
+ return name
+ return None
+
+def int2str(fmt, val):
+ ret = []
+ for b in struct.pack(fmt, val):
+ ret.append(int(b))
+ return " ".join(map(lambda x: str(x), ret))
+
+def str2int(strtab):
+ inttab = []
+ for i in strtab:
+ inttab.append(int(i, 16))
+ ba = bytearray(inttab)
+ if len(strtab) == 4:
+ fmt = "I"
+ elif len(strtab) == 8:
+ fmt = "Q"
+ else:
+ raise Exception("String array of len %d can't be unpacked to an int" %
+ (len(strtab)))
+ return struct.unpack(fmt, ba)[0]
+
+class DebugfsDir:
+ """
+ Class for accessing DebugFS directories as a dictionary.
+ """
+
+ def __init__(self, path):
+ self.path = path
+ self._dict = self._debugfs_dir_read(path)
+
+ def __len__(self):
+ return len(self._dict.keys())
+
+ def __getitem__(self, key):
+ if type(key) is int:
+ key = list(self._dict.keys())[key]
+ return self._dict[key]
+
+ def __setitem__(self, key, value):
+ log("DebugFS set %s = %s" % (key, value), "")
+ log_level_inc()
+
+ cmd("echo '%s' > %s/%s" % (value, self.path, key))
+ log_level_dec()
+
+ _, out = cmd('cat %s/%s' % (self.path, key))
+ self._dict[key] = out.strip()
+
+ def _debugfs_dir_read(self, path):
+ dfs = {}
+
+ log("DebugFS state for %s" % (path), "")
+ log_level_inc(add=2)
+
+ _, out = cmd('ls ' + path)
+ for f in out.split():
+ p = os.path.join(path, f)
+ if os.path.isfile(p):
+ _, out = cmd('cat %s/%s' % (path, f))
+ dfs[f] = out.strip()
+ elif os.path.isdir(p):
+ dfs[f] = DebugfsDir(p)
+ else:
+ raise Exception("%s is neither file nor directory" % (p))
+
+ log_level_dec()
+ log("DebugFS state", dfs)
+ log_level_dec()
+
+ return dfs
+
+class NetdevSim:
+ """
+ Class for netdevsim netdevice and its attributes.
+ """
+
+ def __init__(self):
+ self.dev = self._netdevsim_create()
+ devs.append(self)
+
+ self.ns = ""
+
+ self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
+ self.dfs_refresh()
+
+ def __getitem__(self, key):
+ return self.dev[key]
+
+ def _netdevsim_create(self):
+ _, old = ip("link show")
+ ip("link add sim%d type netdevsim")
+ _, new = ip("link show")
+
+ for dev in new:
+ f = filter(lambda x: x["ifname"] == dev["ifname"], old)
+ if len(list(f)) == 0:
+ return dev
+
+ raise Exception("failed to create netdevsim device")
+
+ def remove(self):
+ devs.remove(self)
+ ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns)
+
+ def dfs_refresh(self):
+ self.dfs = DebugfsDir(self.dfs_dir)
+ return self.dfs
+
+ def dfs_num_bound_progs(self):
+ path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+ _, progs = cmd('ls %s' % (path))
+ return len(progs.split())
+
+ def dfs_get_bound_progs(self, expected):
+ progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+ if expected is not None:
+ if len(progs) != expected:
+ fail(True, "%d BPF programs bound, expected %d" %
+ (len(progs), expected))
+ return progs
+
+ def wait_for_flush(self, bound=0, total=0, n_retry=20):
+ for i in range(n_retry):
+ nbound = self.dfs_num_bound_progs()
+ nprogs = len(bpftool_prog_list())
+ if nbound == bound and nprogs == total:
+ return
+ time.sleep(0.05)
+ raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs))
+
+ def set_ns(self, ns):
+ name = "1" if ns == "" else ns
+ ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns)
+ self.ns = ns
+
+ def set_mtu(self, mtu, fail=True):
+ return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu),
+ fail=fail)
+
+ def set_xdp(self, bpf, mode, force=False, JSON=True, verbose=False,
+ fail=True, include_stderr=False):
+ if verbose:
+ bpf += " verbose"
+ return ip("link set dev %s xdp%s %s" % (self.dev["ifname"], mode, bpf),
+ force=force, JSON=JSON,
+ fail=fail, include_stderr=include_stderr)
+
+ def unset_xdp(self, mode, force=False, JSON=True,
+ fail=True, include_stderr=False):
+ return ip("link set dev %s xdp%s off" % (self.dev["ifname"], mode),
+ force=force, JSON=JSON,
+ fail=fail, include_stderr=include_stderr)
+
+ def ip_link_show(self, xdp):
+ _, link = ip("link show dev %s" % (self['ifname']))
+ if len(link) > 1:
+ raise Exception("Multiple objects on ip link show")
+ if len(link) < 1:
+ return {}
+ fail(xdp != "xdp" in link,
+ "XDP program not reporting in iplink (reported %s, expected %s)" %
+ ("xdp" in link, xdp))
+ return link[0]
+
+ def tc_add_ingress(self):
+ tc("qdisc add dev %s ingress" % (self['ifname']))
+
+ def tc_del_ingress(self):
+ tc("qdisc del dev %s ingress" % (self['ifname']))
+
+ def tc_flush_filters(self, bound=0, total=0):
+ self.tc_del_ingress()
+ self.tc_add_ingress()
+ self.wait_for_flush(bound=bound, total=total)
+
+ def tc_show_ingress(self, expected=None):
+ # No JSON support, oh well...
+ flags = ["skip_sw", "skip_hw", "in_hw"]
+ named = ["protocol", "pref", "chain", "handle", "id", "tag"]
+
+ args = "-s filter show dev %s ingress" % (self['ifname'])
+ _, out = tc(args, JSON=False)
+
+ filters = []
+ lines = out.split('\n')
+ for line in lines:
+ words = line.split()
+ if "handle" not in words:
+ continue
+ fltr = {}
+ for flag in flags:
+ fltr[flag] = flag in words
+ for name in named:
+ try:
+ idx = words.index(name)
+ fltr[name] = words[idx + 1]
+ except ValueError:
+ pass
+ filters.append(fltr)
+
+ if expected is not None:
+ fail(len(filters) != expected,
+ "%d ingress filters loaded, expected %d" %
+ (len(filters), expected))
+ return filters
+
+ def cls_filter_op(self, op, qdisc="ingress", prio=None, handle=None,
+ chain=None, cls="", params="",
+ fail=True, include_stderr=False):
+ spec = ""
+ if prio is not None:
+ spec += " prio %d" % (prio)
+ if handle:
+ spec += " handle %s" % (handle)
+ if chain is not None:
+ spec += " chain %d" % (chain)
+
+ return tc("filter {op} dev {dev} {qdisc} {spec} {cls} {params}"\
+ .format(op=op, dev=self['ifname'], qdisc=qdisc, spec=spec,
+ cls=cls, params=params),
+ fail=fail, include_stderr=include_stderr)
+
+ def cls_bpf_add_filter(self, bpf, op="add", prio=None, handle=None,
+ chain=None, da=False, verbose=False,
+ skip_sw=False, skip_hw=False,
+ fail=True, include_stderr=False):
+ cls = "bpf " + bpf
+
+ params = ""
+ if da:
+ params += " da"
+ if verbose:
+ params += " verbose"
+ if skip_sw:
+ params += " skip_sw"
+ if skip_hw:
+ params += " skip_hw"
+
+ return self.cls_filter_op(op=op, prio=prio, handle=handle, cls=cls,
+ chain=chain, params=params,
+ fail=fail, include_stderr=include_stderr)
+
+ def set_ethtool_tc_offloads(self, enable, fail=True):
+ args = "hw-tc-offload %s" % ("on" if enable else "off")
+ return ethtool(self, "-K", args, fail=fail)
+
+################################################################################
+def clean_up():
+ global files, netns, devs
+
+ for dev in devs:
+ dev.remove()
+ for f in files:
+ cmd("rm -f %s" % (f))
+ for ns in netns:
+ cmd("ip netns delete %s" % (ns))
+ files = []
+ netns = []
+
+def pin_prog(file_name, idx=0):
+ progs = bpftool_prog_list(expected=(idx + 1))
+ prog = progs[idx]
+ bpftool("prog pin id %d %s" % (prog["id"], file_name))
+ files.append(file_name)
+
+ return file_name, bpf_pinned(file_name)
+
+def pin_map(file_name, idx=0, expected=1):
+ maps = bpftool_map_list(expected=expected)
+ m = maps[idx]
+ bpftool("map pin id %d %s" % (m["id"], file_name))
+ files.append(file_name)
+
+ return file_name, bpf_pinned(file_name)
+
+def check_dev_info_removed(prog_file=None, map_file=None):
+ bpftool_prog_list(expected=0)
+ ret, err = bpftool("prog show pin %s" % (prog_file), fail=False)
+ fail(ret == 0, "Showing prog with removed device did not fail")
+ fail(err["error"].find("No such device") == -1,
+ "Showing prog with removed device expected ENODEV, error is %s" %
+ (err["error"]))
+
+ bpftool_map_list(expected=0)
+ ret, err = bpftool("map show pin %s" % (map_file), fail=False)
+ fail(ret == 0, "Showing map with removed device did not fail")
+ fail(err["error"].find("No such device") == -1,
+ "Showing map with removed device expected ENODEV, error is %s" %
+ (err["error"]))
+
+def check_dev_info(other_ns, ns, prog_file=None, map_file=None, removed=False):
+ progs = bpftool_prog_list(expected=1, ns=ns)
+ prog = progs[0]
+
+ fail("dev" not in prog.keys(), "Device parameters not reported")
+ dev = prog["dev"]
+ fail("ifindex" not in dev.keys(), "Device parameters not reported")
+ fail("ns_dev" not in dev.keys(), "Device parameters not reported")
+ fail("ns_inode" not in dev.keys(), "Device parameters not reported")
+
+ if not other_ns:
+ fail("ifname" not in dev.keys(), "Ifname not reported")
+ fail(dev["ifname"] != sim["ifname"],
+ "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"]))
+ else:
+ fail("ifname" in dev.keys(), "Ifname is reported for other ns")
+
+ maps = bpftool_map_list(expected=2, ns=ns)
+ for m in maps:
+ fail("dev" not in m.keys(), "Device parameters not reported")
+ fail(dev != m["dev"], "Map's device different than program's")
+
+def check_extack(output, reference, args):
+ if skip_extack:
+ return
+ lines = output.split("\n")
+ comp = len(lines) >= 2 and lines[1] == reference
+ fail(not comp, "Missing or incorrect netlink extack message")
+
+def check_extack_nsim(output, reference, args):
+ check_extack(output, "Error: netdevsim: " + reference, args)
+
+def check_no_extack(res, needle):
+ fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
+ "Found '%s' in command output, leaky extack?" % (needle))
+
+def check_verifier_log(output, reference):
+ lines = output.split("\n")
+ for l in reversed(lines):
+ if l == reference:
+ return
+ fail(True, "Missing or incorrect message from netdevsim in verifier log")
+
+def test_spurios_extack(sim, obj, skip_hw, needle):
+ res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
+ include_stderr=True)
+ check_no_extack(res, needle)
+ res = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
+ skip_hw=skip_hw, include_stderr=True)
+ check_no_extack(res, needle)
+ res = sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf",
+ include_stderr=True)
+ check_no_extack(res, needle)
+
+
+# Parse command line
+parser = argparse.ArgumentParser()
+parser.add_argument("--log", help="output verbose log to given file")
+args = parser.parse_args()
+if args.log:
+ logfile = open(args.log, 'w+')
+ logfile.write("# -*-Org-*-")
+
+log("Prepare...", "", level=1)
+log_level_inc()
+
+# Check permissions
+skip(os.getuid() != 0, "test must be run as root")
+
+# Check tools
+ret, progs = bpftool("prog", fail=False)
+skip(ret != 0, "bpftool not installed")
+# Check no BPF programs are loaded
+skip(len(progs) != 0, "BPF programs already loaded on the system")
+
+# Check netdevsim
+ret, out = cmd("modprobe netdevsim", fail=False)
+skip(ret != 0, "netdevsim module could not be loaded")
+
+# Check debugfs
+_, out = cmd("mount")
+if out.find("/sys/kernel/debug type debugfs") == -1:
+ cmd("mount -t debugfs none /sys/kernel/debug")
+
+# Check samples are compiled
+samples = ["sample_ret0.o", "sample_map_ret0.o"]
+for s in samples:
+ ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False)
+ skip(ret != 0, "sample %s/%s not found, please compile it" %
+ (bpf_test_dir, s))
+
+# Check if iproute2 is built with libmnl (needed by extack support)
+_, _, err = cmd("tc qdisc delete dev lo handle 0",
+ fail=False, include_stderr=True)
+if err.find("Error: Failed to find qdisc with specified handle.") == -1:
+ print("Warning: no extack message in iproute2 output, libmnl missing?")
+ log("Warning: no extack message in iproute2 output, libmnl missing?", "")
+ skip_extack = True
+
+# Check if net namespaces seem to work
+ns = mknetns()
+skip(ns is None, "Could not create a net namespace")
+cmd("ip netns delete %s" % (ns))
+netns = []
+
+try:
+ obj = bpf_obj("sample_ret0.o")
+ bytecode = bpf_bytecode("1,6 0 0 4294967295,")
+
+ start_test("Test destruction of generic XDP...")
+ sim = NetdevSim()
+ sim.set_xdp(obj, "generic")
+ sim.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ sim.tc_add_ingress()
+
+ start_test("Test TC non-offloaded...")
+ ret, _ = sim.cls_bpf_add_filter(obj, skip_hw=True, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+
+ start_test("Test TC non-offloaded isn't getting bound...")
+ ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+ sim.dfs_get_bound_progs(expected=0)
+
+ sim.tc_flush_filters()
+
+ start_test("Test TC offloads are off by default...")
+ ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "TC filter loaded without enabling TC offloads")
+ check_extack(err, "Error: TC offload is disabled on net device.", args)
+ sim.wait_for_flush()
+
+ sim.set_ethtool_tc_offloads(True)
+ sim.dfs["bpf_tc_non_bound_accept"] = "Y"
+
+ start_test("Test TC offload by default...")
+ ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+ sim.dfs_get_bound_progs(expected=0)
+ ingress = sim.tc_show_ingress(expected=1)
+ fltr = ingress[0]
+ fail(not fltr["in_hw"], "Filter not offloaded by default")
+
+ sim.tc_flush_filters()
+
+ start_test("Test TC cBPF bytcode tries offload by default...")
+ ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+ sim.dfs_get_bound_progs(expected=0)
+ ingress = sim.tc_show_ingress(expected=1)
+ fltr = ingress[0]
+ fail(not fltr["in_hw"], "Bytecode not offloaded by default")
+
+ sim.tc_flush_filters()
+ sim.dfs["bpf_tc_non_bound_accept"] = "N"
+
+ start_test("Test TC cBPF unbound bytecode doesn't offload...")
+ ret, _, err = sim.cls_bpf_add_filter(bytecode, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "TC bytecode loaded for offload")
+ check_extack_nsim(err, "netdevsim configured to reject unbound programs.",
+ args)
+ sim.wait_for_flush()
+
+ start_test("Test non-0 chain offload...")
+ ret, _, err = sim.cls_bpf_add_filter(obj, chain=1, prio=1, handle=1,
+ skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Offloaded a filter to chain other than 0")
+ check_extack(err, "Error: Driver supports only offload of chain 0.", args)
+ sim.tc_flush_filters()
+
+ start_test("Test TC replace...")
+ sim.cls_bpf_add_filter(obj, prio=1, handle=1)
+ sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1)
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_sw=True)
+ sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_sw=True)
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=True)
+ sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_hw=True)
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ start_test("Test TC replace bad flags...")
+ for i in range(3):
+ for j in range(3):
+ ret, _ = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
+ skip_sw=(j == 1), skip_hw=(j == 2),
+ fail=False)
+ fail(bool(ret) != bool(j),
+ "Software TC incorrect load in replace test, iteration %d" %
+ (j))
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ start_test("Test spurious extack from the driver...")
+ test_spurios_extack(sim, obj, False, "netdevsim")
+ test_spurios_extack(sim, obj, True, "netdevsim")
+
+ sim.set_ethtool_tc_offloads(False)
+
+ test_spurios_extack(sim, obj, False, "TC offload is disabled")
+ test_spurios_extack(sim, obj, True, "TC offload is disabled")
+
+ sim.set_ethtool_tc_offloads(True)
+
+ sim.tc_flush_filters()
+
+ start_test("Test TC offloads work...")
+ ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret != 0, "TC filter did not load with TC offloads enabled")
+ check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+
+ start_test("Test TC offload basics...")
+ dfs = sim.dfs_get_bound_progs(expected=1)
+ progs = bpftool_prog_list(expected=1)
+ ingress = sim.tc_show_ingress(expected=1)
+
+ dprog = dfs[0]
+ prog = progs[0]
+ fltr = ingress[0]
+ fail(fltr["skip_hw"], "TC does reports 'skip_hw' on offloaded filter")
+ fail(not fltr["in_hw"], "TC does not report 'in_hw' for offloaded filter")
+ fail(not fltr["skip_sw"], "TC does not report 'skip_sw' back")
+
+ start_test("Test TC offload is device-bound...")
+ fail(str(prog["id"]) != fltr["id"], "Program IDs don't match")
+ fail(prog["tag"] != fltr["tag"], "Program tags don't match")
+ fail(fltr["id"] != dprog["id"], "Program IDs don't match")
+ fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+ fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+ start_test("Test disabling TC offloads is rejected while filters installed...")
+ ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+ fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+
+ start_test("Test qdisc removal frees things...")
+ sim.tc_flush_filters()
+ sim.tc_show_ingress(expected=0)
+
+ start_test("Test disabling TC offloads is OK without filters...")
+ ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+ fail(ret != 0,
+ "Driver refused to disable TC offloads without filters installed...")
+
+ sim.set_ethtool_tc_offloads(True)
+
+ start_test("Test destroying device gets rid of TC filters...")
+ sim.cls_bpf_add_filter(obj, skip_sw=True)
+ sim.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+
+ start_test("Test destroying device gets rid of XDP...")
+ sim.set_xdp(obj, "offload")
+ sim.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+
+ start_test("Test XDP prog reporting...")
+ sim.set_xdp(obj, "drv")
+ ipl = sim.ip_link_show(xdp=True)
+ progs = bpftool_prog_list(expected=1)
+ fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+ "Loaded program has wrong ID")
+
+ start_test("Test XDP prog replace without force...")
+ ret, _ = sim.set_xdp(obj, "drv", fail=False)
+ fail(ret == 0, "Replaced XDP program without -force")
+ sim.wait_for_flush(total=1)
+
+ start_test("Test XDP prog replace with force...")
+ ret, _ = sim.set_xdp(obj, "drv", force=True, fail=False)
+ fail(ret != 0, "Could not replace XDP program with -force")
+ bpftool_prog_list_wait(expected=1)
+ ipl = sim.ip_link_show(xdp=True)
+ progs = bpftool_prog_list(expected=1)
+ fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+ "Loaded program has wrong ID")
+ fail("dev" in progs[0].keys(),
+ "Device parameters reported for non-offloaded program")
+
+ start_test("Test XDP prog replace with bad flags...")
+ ret, _, err = sim.set_xdp(obj, "offload", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced XDP program with a program in different mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+ ret, _, err = sim.set_xdp(obj, "", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced XDP program with a program in different mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+
+ start_test("Test XDP prog remove with bad flags...")
+ ret, _, err = sim.unset_xdp("offload", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+ ret, _, err = sim.unset_xdp("", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+
+ start_test("Test MTU restrictions...")
+ ret, _ = sim.set_mtu(9000, fail=False)
+ fail(ret == 0,
+ "Driver should refuse to increase MTU to 9000 with XDP loaded...")
+ sim.unset_xdp("drv")
+ bpftool_prog_list_wait(expected=0)
+ sim.set_mtu(9000)
+ ret, _, err = sim.set_xdp(obj, "drv", fail=False, include_stderr=True)
+ fail(ret == 0, "Driver should refuse to load program with MTU of 9000...")
+ check_extack_nsim(err, "MTU too large w/ XDP enabled.", args)
+ sim.set_mtu(1500)
+
+ sim.wait_for_flush()
+ start_test("Test XDP offload...")
+ _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
+ ipl = sim.ip_link_show(xdp=True)
+ link_xdp = ipl["xdp"]["prog"]
+ progs = bpftool_prog_list(expected=1)
+ prog = progs[0]
+ fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
+ check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+
+ start_test("Test XDP offload is device bound...")
+ dfs = sim.dfs_get_bound_progs(expected=1)
+ dprog = dfs[0]
+
+ fail(prog["id"] != link_xdp["id"], "Program IDs don't match")
+ fail(prog["tag"] != link_xdp["tag"], "Program tags don't match")
+ fail(str(link_xdp["id"]) != dprog["id"], "Program IDs don't match")
+ fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+ fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+ start_test("Test removing XDP program many times...")
+ sim.unset_xdp("offload")
+ sim.unset_xdp("offload")
+ sim.unset_xdp("drv")
+ sim.unset_xdp("drv")
+ sim.unset_xdp("")
+ sim.unset_xdp("")
+ bpftool_prog_list_wait(expected=0)
+
+ start_test("Test attempt to use a program for a wrong device...")
+ sim2 = NetdevSim()
+ sim2.set_xdp(obj, "offload")
+ pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+
+ ret, _, err = sim.set_xdp(pinned, "offload",
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Pinned program loaded for a different device accepted")
+ check_extack_nsim(err, "program bound to different dev.", args)
+ sim2.remove()
+ ret, _, err = sim.set_xdp(pinned, "offload",
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Pinned program loaded for a removed device accepted")
+ check_extack_nsim(err, "xdpoffload of non-bound program.", args)
+ rm(pin_file)
+ bpftool_prog_list_wait(expected=0)
+
+ start_test("Test mixing of TC and XDP...")
+ sim.tc_add_ingress()
+ sim.set_xdp(obj, "offload")
+ ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Loading TC when XDP active should fail")
+ check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
+ sim.unset_xdp("offload")
+ sim.wait_for_flush()
+
+ sim.cls_bpf_add_filter(obj, skip_sw=True)
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(ret == 0, "Loading XDP when TC active should fail")
+ check_extack_nsim(err, "TC program is already loaded.", args)
+
+ start_test("Test binding TC from pinned...")
+ pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+ sim.tc_flush_filters(bound=1, total=1)
+ sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True)
+ sim.tc_flush_filters(bound=1, total=1)
+
+ start_test("Test binding XDP from pinned...")
+ sim.set_xdp(obj, "offload")
+ pin_file, pinned = pin_prog("/sys/fs/bpf/tmp2", idx=1)
+
+ sim.set_xdp(pinned, "offload", force=True)
+ sim.unset_xdp("offload")
+ sim.set_xdp(pinned, "offload", force=True)
+ sim.unset_xdp("offload")
+
+ start_test("Test offload of wrong type fails...")
+ ret, _ = sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True, fail=False)
+ fail(ret == 0, "Managed to attach XDP program to TC")
+
+ start_test("Test asking for TC offload of two filters...")
+ sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
+ ret, _, err = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Managed to offload two TC filters at the same time")
+ check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
+
+ sim.tc_flush_filters(bound=2, total=2)
+
+ start_test("Test if netdev removal waits for translation...")
+ delay_msec = 500
+ sim.dfs["bpf_bind_verifier_delay"] = delay_msec
+ start = time.time()
+ cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
+ (sim['ifname'], obj)
+ tc_proc = cmd(cmd_line, background=True, fail=False)
+ # Wait for the verifier to start
+ while sim.dfs_num_bound_progs() <= 2:
+ pass
+ sim.remove()
+ end = time.time()
+ ret, _ = cmd_result(tc_proc, fail=False)
+ time_diff = end - start
+ log("Time", "start:\t%s\nend:\t%s\ndiff:\t%s" % (start, end, time_diff))
+
+ fail(ret == 0, "Managed to load TC filter on a unregistering device")
+ delay_sec = delay_msec * 0.001
+ fail(time_diff < delay_sec, "Removal process took %s, expected %s" %
+ (time_diff, delay_sec))
+
+ # Remove all pinned files and reinstantiate the netdev
+ clean_up()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ map_obj = bpf_obj("sample_map_ret0.o")
+ start_test("Test loading program with maps...")
+ sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+
+ start_test("Test bpftool bound info reporting (own ns)...")
+ check_dev_info(False, "")
+
+ start_test("Test bpftool bound info reporting (other ns)...")
+ ns = mknetns()
+ sim.set_ns(ns)
+ check_dev_info(True, "")
+
+ start_test("Test bpftool bound info reporting (remote ns)...")
+ check_dev_info(False, ns)
+
+ start_test("Test bpftool bound info reporting (back to own ns)...")
+ sim.set_ns("")
+ check_dev_info(False, "")
+
+ prog_file, _ = pin_prog("/sys/fs/bpf/tmp_prog")
+ map_file, _ = pin_map("/sys/fs/bpf/tmp_map", idx=1, expected=2)
+ sim.remove()
+
+ start_test("Test bpftool bound info reporting (removed dev)...")
+ check_dev_info_removed(prog_file=prog_file, map_file=map_file)
+
+ # Remove all pinned files and reinstantiate the netdev
+ clean_up()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+
+ start_test("Test map update (no flags)...")
+ sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+ maps = bpftool_map_list(expected=2)
+ array = maps[0] if maps[0]["type"] == "array" else maps[1]
+ htab = maps[0] if maps[0]["type"] == "hash" else maps[1]
+ for m in maps:
+ for i in range(2):
+ bpftool("map update id %d key %s value %s" %
+ (m["id"], int2str("I", i), int2str("Q", i * 3)))
+
+ for m in maps:
+ ret, _ = bpftool("map update id %d key %s value %s" %
+ (m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
+ fail=False)
+ fail(ret == 0, "added too many entries")
+
+ start_test("Test map update (exists)...")
+ for m in maps:
+ for i in range(2):
+ bpftool("map update id %d key %s value %s exist" %
+ (m["id"], int2str("I", i), int2str("Q", i * 3)))
+
+ for m in maps:
+ ret, err = bpftool("map update id %d key %s value %s exist" %
+ (m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
+ fail=False)
+ fail(ret == 0, "updated non-existing key")
+ fail(err["error"].find("No such file or directory") == -1,
+ "expected ENOENT, error is '%s'" % (err["error"]))
+
+ start_test("Test map update (noexist)...")
+ for m in maps:
+ for i in range(2):
+ ret, err = bpftool("map update id %d key %s value %s noexist" %
+ (m["id"], int2str("I", i), int2str("Q", i * 3)),
+ fail=False)
+ fail(ret == 0, "updated existing key")
+ fail(err["error"].find("File exists") == -1,
+ "expected EEXIST, error is '%s'" % (err["error"]))
+
+ start_test("Test map dump...")
+ for m in maps:
+ _, entries = bpftool("map dump id %d" % (m["id"]))
+ for i in range(2):
+ key = str2int(entries[i]["key"])
+ fail(key != i, "expected key %d, got %d" % (key, i))
+ val = str2int(entries[i]["value"])
+ fail(val != i * 3, "expected value %d, got %d" % (val, i * 3))
+
+ start_test("Test map getnext...")
+ for m in maps:
+ _, entry = bpftool("map getnext id %d" % (m["id"]))
+ key = str2int(entry["next_key"])
+ fail(key != 0, "next key %d, expected %d" % (key, 0))
+ _, entry = bpftool("map getnext id %d key %s" %
+ (m["id"], int2str("I", 0)))
+ key = str2int(entry["next_key"])
+ fail(key != 1, "next key %d, expected %d" % (key, 1))
+ ret, err = bpftool("map getnext id %d key %s" %
+ (m["id"], int2str("I", 1)), fail=False)
+ fail(ret == 0, "got next key past the end of map")
+ fail(err["error"].find("No such file or directory") == -1,
+ "expected ENOENT, error is '%s'" % (err["error"]))
+
+ start_test("Test map delete (htab)...")
+ for i in range(2):
+ bpftool("map delete id %d key %s" % (htab["id"], int2str("I", i)))
+
+ start_test("Test map delete (array)...")
+ for i in range(2):
+ ret, err = bpftool("map delete id %d key %s" %
+ (htab["id"], int2str("I", i)), fail=False)
+ fail(ret == 0, "removed entry from an array")
+ fail(err["error"].find("No such file or directory") == -1,
+ "expected ENOENT, error is '%s'" % (err["error"]))
+
+ start_test("Test map remove...")
+ sim.unset_xdp("offload")
+ bpftool_map_list_wait(expected=0)
+ sim.remove()
+
+ sim = NetdevSim()
+ sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+ sim.remove()
+ bpftool_map_list_wait(expected=0)
+
+ start_test("Test map creation fail path...")
+ sim = NetdevSim()
+ sim.dfs["bpf_map_accept"] = "N"
+ ret, _ = sim.set_xdp(map_obj, "offload", JSON=False, fail=False)
+ fail(ret == 0,
+ "netdevsim didn't refuse to create a map with offload disabled")
+
+ print("%s: OK" % (os.path.basename(__file__)))
+
+finally:
+ log("Clean up...", "", level=1)
+ log_level_inc()
+ clean_up()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 69427531408d..b549308abd19 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -21,8 +21,10 @@ typedef __u16 __sum16;
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/filter.h>
+#include <linux/perf_event.h>
#include <linux/unistd.h>
+#include <sys/ioctl.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <sys/types.h>
@@ -167,10 +169,9 @@ out:
#define NUM_ITER 100000
#define VIP_NUM 5
-static void test_l4lb(void)
+static void test_l4lb(const char *file)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
- const char *file = "./test_l4lb.o";
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
@@ -247,6 +248,95 @@ out:
bpf_object__close(obj);
}
+static void test_l4lb_all(void)
+{
+ const char *file1 = "./test_l4lb.o";
+ const char *file2 = "./test_l4lb_noinline.o";
+
+ test_l4lb(file1);
+ test_l4lb(file2);
+}
+
+static void test_xdp_noinline(void)
+{
+ const char *file = "./test_xdp_noinline.o";
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || errno || retval != 1 || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || errno || retval != 1 || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
+
static void test_tcp_estats(void)
{
const char *file = "./test_tcp_estats.o";
@@ -351,7 +441,7 @@ static void test_bpf_obj_id(void)
info_len != sizeof(struct bpf_map_info) ||
strcmp((char *)map_infos[i].name, expected_map_name),
"get-map-info(fd)",
- "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+ "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
err, errno,
map_infos[i].type, BPF_MAP_TYPE_ARRAY,
info_len, sizeof(struct bpf_map_info),
@@ -395,7 +485,7 @@ static void test_bpf_obj_id(void)
*(int *)prog_infos[i].map_ids != map_infos[i].id ||
strcmp((char *)prog_infos[i].name, expected_prog_name),
"get-prog-info(fd)",
- "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+ "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
err, errno, i,
prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +553,7 @@ static void test_bpf_obj_id(void)
memcmp(&prog_info, &prog_infos[i], info_len) ||
*(int *)prog_info.map_ids != saved_map_id,
"get-prog-info(next_id->fd)",
- "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
+ "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
err, errno, info_len, sizeof(struct bpf_prog_info),
memcmp(&prog_info, &prog_infos[i], info_len),
*(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +599,7 @@ static void test_bpf_obj_id(void)
memcmp(&map_info, &map_infos[i], info_len) ||
array_value != array_magic_value,
"check get-map-info(next_id->fd)",
- "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+ "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
err, errno, info_len, sizeof(struct bpf_map_info),
memcmp(&map_info, &map_infos[i], info_len),
array_value, array_magic_value);
@@ -617,6 +707,262 @@ static void test_obj_name(void)
}
}
+static void test_tp_attach_query(void)
+{
+ const int num_progs = 3;
+ int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+ __u32 duration = 0, info_len, saved_prog_ids[num_progs];
+ const char *file = "./test_tracepoint.o";
+ struct perf_event_query_bpf *query;
+ struct perf_event_attr attr = {};
+ struct bpf_object *obj[num_progs];
+ struct bpf_prog_info prog_info;
+ char buf[256];
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ return;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ return;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+ for (i = 0; i < num_progs; i++) {
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ &prog_fd[i]);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto cleanup1;
+
+ bzero(&prog_info, sizeof(prog_info));
+ prog_info.jited_prog_len = 0;
+ prog_info.xlated_prog_len = 0;
+ prog_info.nr_map_ids = 0;
+ info_len = sizeof(prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+ if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err, errno))
+ goto cleanup1;
+ saved_prog_ids[i] = prog_info.id;
+
+ pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd[i], errno))
+ goto cleanup2;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 0) {
+ /* check NULL prog array query */
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 0,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 1) {
+ /* try to get # of programs only */
+ query->ids_len = 0;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+
+ /* try a few negative tests */
+ /* invalid query pointer */
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+ (struct perf_event_query_bpf *)0x1);
+ if (CHECK(!err || errno != EFAULT,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d\n", err, errno))
+ goto cleanup3;
+
+ /* no enough space */
+ query->ids_len = 1;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != (i + 1),
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ for (j = 0; j < i + 1; j++)
+ if (CHECK(saved_prog_ids[j] != query->ids[j],
+ "perf_event_ioc_query_bpf",
+ "#%d saved_prog_id %x query prog_id %x\n",
+ j, saved_prog_ids[j], query->ids[j]))
+ goto cleanup3;
+ }
+
+ i = num_progs - 1;
+ for (; i >= 0; i--) {
+ cleanup3:
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+ close(pmu_fd[i]);
+ cleanup1:
+ bpf_object__close(obj[i]);
+ }
+ free(query);
+}
+
+static int compare_map_keys(int map1_fd, int map2_fd)
+{
+ __u32 key, next_key;
+ char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
+ int err;
+
+ err = bpf_map_get_next_key(map1_fd, NULL, &key);
+ if (err)
+ return err;
+ err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
+ if (err)
+ return err;
+
+ while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
+ err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
+ if (err)
+ return err;
+
+ key = next_key;
+ }
+ if (errno != ENOENT)
+ return -1;
+
+ return 0;
+}
+
+static void test_stacktrace_map()
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int bytes, efd, err, pmu_fd, prog_fd;
+ struct perf_event_attr attr = {};
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ ; /* fall through */
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+
+out:
+ return;
+}
+
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -625,11 +971,14 @@ int main(void)
test_pkt_access();
test_xdp();
- test_l4lb();
+ test_l4lb_all();
+ test_xdp_noinline();
test_tcp_estats();
test_bpf_obj_id();
test_pkt_md_access();
test_obj_name();
+ test_tp_attach_query();
+ test_stacktrace_map();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/test_stacktrace_map.c
new file mode 100644
index 000000000000..76d85c5d08bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_stacktrace_map.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
+
+struct bpf_map_def SEC("maps") control_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") stackid_hmap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+ .type = BPF_MAP_TYPE_STACK_TRACE,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
+ .max_entries = 10000,
+};
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+ unsigned long long pad;
+ char prev_comm[16];
+ int prev_pid;
+ int prev_prio;
+ long long prev_state;
+ char next_comm[16];
+ int next_pid;
+ int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+ __u32 key = 0, val = 0, *value_p;
+
+ value_p = bpf_map_lookup_elem(&control_map, &key);
+ if (value_p && *value_p)
+ return 0; /* skip if non-zero *value_p */
+
+ /* The size of stackmap and stackid_hmap should be the same */
+ key = bpf_get_stackid(ctx, &stackmap, 0);
+ if ((int)key >= 0)
+ bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
new file mode 100644
index 000000000000..2fe43289943c
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _TEST_TCPBPF_H
+#define _TEST_TCPBPF_H
+
+struct tcpbpf_globals {
+ __u32 event_map;
+ __u32 total_retrans;
+ __u32 data_segs_in;
+ __u32 data_segs_out;
+ __u32 bad_cb_test_rv;
+ __u32 good_cb_test_rv;
+ __u64 bytes_received;
+ __u64 bytes_acked;
+};
+#endif
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
new file mode 100644
index 000000000000..57119ad57a3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/in6.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <netinet/in.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+#include "test_tcpbpf.h"
+
+struct bpf_map_def SEC("maps") global_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct tcpbpf_globals),
+ .max_entries = 2,
+};
+
+static inline void update_event_map(int event)
+{
+ __u32 key = 0;
+ struct tcpbpf_globals g, *gp;
+
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (gp == NULL) {
+ struct tcpbpf_globals g = {0};
+
+ g.event_map |= (1 << event);
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ } else {
+ g = *gp;
+ g.event_map |= (1 << event);
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ }
+}
+
+int _version SEC("version") = 1;
+
+SEC("sockops")
+int bpf_testcb(struct bpf_sock_ops *skops)
+{
+ int rv = -1;
+ int bad_call_rv = 0;
+ int good_call_rv = 0;
+ int op;
+ int v = 0;
+
+ op = (int) skops->op;
+
+ update_event_map(op);
+
+ switch (op) {
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ /* Test failure to set largest cb flag (assumes not defined) */
+ bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
+ /* Set callback */
+ good_call_rv = bpf_sock_ops_cb_flags_set(skops,
+ BPF_SOCK_OPS_STATE_CB_FLAG);
+ /* Update results */
+ {
+ __u32 key = 0;
+ struct tcpbpf_globals g, *gp;
+
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (!gp)
+ break;
+ g = *gp;
+ g.bad_cb_test_rv = bad_call_rv;
+ g.good_cb_test_rv = good_call_rv;
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ }
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ skops->sk_txhash = 0x12345f;
+ v = 0xff;
+ rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v,
+ sizeof(v));
+ break;
+ case BPF_SOCK_OPS_RTO_CB:
+ break;
+ case BPF_SOCK_OPS_RETRANS_CB:
+ break;
+ case BPF_SOCK_OPS_STATE_CB:
+ if (skops->args[1] == BPF_TCP_CLOSE) {
+ __u32 key = 0;
+ struct tcpbpf_globals g, *gp;
+
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (!gp)
+ break;
+ g = *gp;
+ g.total_retrans = skops->total_retrans;
+ g.data_segs_in = skops->data_segs_in;
+ g.data_segs_out = skops->data_segs_out;
+ g.bytes_received = skops->bytes_received;
+ g.bytes_acked = skops->bytes_acked;
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ }
+ break;
+ default:
+ rv = -1;
+ }
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
new file mode 100644
index 000000000000..95a370f3d378
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include <linux/perf_event.h>
+#include "test_tcpbpf.h"
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+ const char *name)
+{
+ struct bpf_map *map;
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (!map) {
+ printf("%s:FAIL:map '%s' not found\n", test, name);
+ return -1;
+ }
+ return bpf_map__fd(map);
+}
+
+#define SYSTEM(CMD) \
+ do { \
+ if (system(CMD)) { \
+ printf("system(%s) FAILS!\n", CMD); \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ const char *file = "test_tcpbpf_kern.o";
+ struct tcpbpf_globals g = {0};
+ int cg_fd, prog_fd, map_fd;
+ bool debug_flag = false;
+ int error = EXIT_FAILURE;
+ struct bpf_object *obj;
+ char cmd[100], *dir;
+ struct stat buffer;
+ __u32 key = 0;
+ int pid;
+ int rv;
+
+ if (argc > 1 && strcmp(argv[1], "-d") == 0)
+ debug_flag = true;
+
+ dir = "/tmp/cgroupv2/foo";
+
+ if (stat(dir, &buffer) != 0) {
+ SYSTEM("mkdir -p /tmp/cgroupv2");
+ SYSTEM("mount -t cgroup2 none /tmp/cgroupv2");
+ SYSTEM("mkdir -p /tmp/cgroupv2/foo");
+ }
+ pid = (int) getpid();
+ sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid);
+ SYSTEM(cmd);
+
+ cg_fd = open(dir, O_DIRECTORY, O_RDONLY);
+ if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
+ printf("FAILED: load_bpf_file failed for: %s\n", file);
+ goto err;
+ }
+
+ rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (rv) {
+ printf("FAILED: bpf_prog_attach: %d (%s)\n",
+ error, strerror(errno));
+ goto err;
+ }
+
+ SYSTEM("./tcp_server.py");
+
+ map_fd = bpf_find_map(__func__, obj, "global_map");
+ if (map_fd < 0)
+ goto err;
+
+ rv = bpf_map_lookup_elem(map_fd, &key, &g);
+ if (rv != 0) {
+ printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
+ goto err;
+ }
+
+ if (g.bytes_received != 501 || g.bytes_acked != 1002 ||
+ g.data_segs_in != 1 || g.data_segs_out != 1 ||
+ (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 ||
+ g.good_cb_test_rv != 0) {
+ printf("FAILED: Wrong stats\n");
+ if (debug_flag) {
+ printf("\n");
+ printf("bytes_received: %d (expecting 501)\n",
+ (int)g.bytes_received);
+ printf("bytes_acked: %d (expecting 1002)\n",
+ (int)g.bytes_acked);
+ printf("data_segs_in: %d (expecting 1)\n",
+ g.data_segs_in);
+ printf("data_segs_out: %d (expecting 1)\n",
+ g.data_segs_out);
+ printf("event_map: 0x%x (at least 0x47e)\n",
+ g.event_map);
+ printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n",
+ g.bad_cb_test_rv);
+ printf("good_cb_test_rv:0x%x (expecting 0)\n",
+ g.good_cb_test_rv);
+ }
+ goto err;
+ }
+ printf("PASSED!\n");
+ error = 0;
+err:
+ bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+ return error;
+
+}
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/test_tracepoint.c
new file mode 100644
index 000000000000..04bf084517e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tracepoint.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+ unsigned long long pad;
+ char prev_comm[16];
+ int prev_pid;
+ int prev_prio;
+ long long prev_state;
+ char next_comm[16];
+ int next_pid;
+ int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3c64f30cf63c..c0f16e93f9bd 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2,6 +2,7 @@
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -20,6 +21,7 @@
#include <stddef.h>
#include <stdbool.h>
#include <sched.h>
+#include <limits.h>
#include <sys/capability.h>
#include <sys/resource.h>
@@ -28,6 +30,7 @@
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
+#include <linux/if_ether.h>
#include <bpf/bpf.h>
@@ -48,6 +51,8 @@
#define MAX_INSNS 512
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 4
+#define POINTER_VALUE 0xcafe4all
+#define TEST_DATA_LEN 64
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
@@ -61,6 +66,7 @@ struct bpf_test {
int fixup_map_in_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
+ uint32_t retval;
enum {
UNDEF,
ACCEPT,
@@ -94,6 +100,326 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = -3,
+ },
+ {
+ "DIV32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "DIV32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "DIV64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "MOD32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "MOD32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "MOD64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "DIV32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_MOV32_IMM(BPF_REG_2, 16),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 8,
+ },
+ {
+ "DIV32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV64 by 0, zero check, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "MOD32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 3),
+ BPF_MOV32_IMM(BPF_REG_2, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ },
+ {
+ "MOD32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "MOD32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "MOD64 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ },
+ {
+ "MOD64 by 0, zero check 2, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = -1,
+ },
+ /* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+ {
+ "DIV32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "MOD32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+ },
+ {
+ "MOD32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+ },
+ {
+ "MOD64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "MOD64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "xor32 zero extend check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_2, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "empty prog",
+ .insns = {
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+ },
+ {
+ "only exit insn",
+ .insns = {
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
},
{
"unreachable",
@@ -209,6 +535,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = 1,
},
{
"test8 ld_imm64",
@@ -273,11 +600,51 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "arsh32 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "unknown opcode c4",
+ },
+ {
+ "arsh32 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "unknown opcode cc",
+ },
+ {
+ "arsh64 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "arsh64 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
- .errstr = "jump out of range",
+ .errstr = "not an exit",
.result = REJECT,
},
{
@@ -367,7 +734,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "BPF_CALL uses reserved",
+ .errstr = "unknown opcode 8d",
.result = REJECT,
},
{
@@ -422,9 +789,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R1 subtraction from stack pointer",
- .result_unpriv = REJECT,
- .errstr = "R1 invalid mem access",
+ .errstr = "R1 subtraction from stack pointer",
.result = REJECT,
},
{
@@ -478,6 +843,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
+ .retval = POINTER_VALUE,
},
{
"check valid spill/fill, skb mark",
@@ -558,7 +924,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_LD_IMM",
+ .errstr = "unknown opcode 00",
.result = REJECT,
},
{
@@ -576,7 +942,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(-1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_ALU opcode f0",
+ .errstr = "unknown opcode ff",
.result = REJECT,
},
{
@@ -585,7 +951,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_ALU opcode f0",
+ .errstr = "unknown opcode ff",
.result = REJECT,
},
{
@@ -606,7 +972,6 @@ static struct bpf_test tests[] = {
},
.errstr = "misaligned stack access",
.result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"invalid map_fd for function call",
@@ -765,6 +1130,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .retval = -ENOENT,
},
{
"jump test 4",
@@ -1785,6 +2151,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = 0xfaceb00c,
},
{
"PTR_TO_STACK store/load - bad alignment on off",
@@ -1797,7 +2164,6 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +2176,6 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"PTR_TO_STACK store/load - out of bounds low",
@@ -1845,6 +2210,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr",
+ .retval = POINTER_VALUE,
},
{
"unpriv: add const to pointer",
@@ -1862,9 +2228,8 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer += pointer",
+ .result = REJECT,
+ .errstr = "R1 pointer += pointer",
},
{
"unpriv: neg pointer",
@@ -2019,6 +2384,7 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
@@ -2559,6 +2925,29 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "context stores via ST",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ST stores into R1 context is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "context stores via XADD",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+ BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_XADD stores into R1 context is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2592,7 +2981,8 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2782,6 +3172,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test12 (and, good access)",
@@ -2806,6 +3197,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test13 (branches, good access)",
@@ -2836,6 +3228,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
@@ -2859,6 +3252,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test15 (spill with xadd)",
@@ -2899,7 +3293,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid access to packet",
+ .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -3145,6 +3539,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test28 (marking on <=, bad access)",
@@ -3885,9 +4280,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3, 11 },
- .errstr_unpriv = "R0 pointer += pointer",
- .errstr = "R0 invalid mem access 'inv'",
- .result_unpriv = REJECT,
+ .errstr = "R0 pointer += pointer",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@@ -3928,7 +4321,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
- .errstr = "R4 invalid mem access",
+ .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -3949,7 +4342,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
- .errstr = "R4 invalid mem access",
+ .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -3970,7 +4363,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
- .errstr = "R4 invalid mem access",
+ .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -4279,7 +4672,8 @@ static struct bpf_test tests[] = {
.fixup_map1 = { 2 },
.errstr_unpriv = "R2 leaks addr into mem",
.result_unpriv = REJECT,
- .result = ACCEPT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 context is not allowed",
},
{
"leak pointer into ctx 2",
@@ -4293,7 +4687,8 @@ static struct bpf_test tests[] = {
},
.errstr_unpriv = "R10 leaks addr into mem",
.result_unpriv = REJECT,
- .result = ACCEPT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 context is not allowed",
},
{
"leak pointer into ctx 3",
@@ -5195,10 +5590,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr_unpriv = "R0 bitwise operator &= on pointer",
- .errstr = "invalid mem access 'inv'",
+ .errstr = "R0 bitwise operator &= on pointer",
.result = REJECT,
- .result_unpriv = REJECT,
},
{
"map element value illegal alu op, 2",
@@ -5214,10 +5607,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
- .errstr = "invalid mem access 'inv'",
+ .errstr = "R0 32-bit pointer arithmetic prohibited",
.result = REJECT,
- .result_unpriv = REJECT,
},
{
"map element value illegal alu op, 3",
@@ -5233,10 +5624,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr_unpriv = "R0 pointer arithmetic with /= operator",
- .errstr = "invalid mem access 'inv'",
+ .errstr = "R0 pointer arithmetic with /= operator",
.result = REJECT,
- .result_unpriv = REJECT,
},
{
"map element value illegal alu op, 4",
@@ -5648,7 +6037,7 @@ static struct bpf_test tests[] = {
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -5770,6 +6159,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 0 /* csum_diff of 64-byte packet */,
},
{
"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
@@ -5883,7 +6273,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
@@ -6019,8 +6409,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
- .errstr = "R1 type=inv expected=map_ptr",
- .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+ .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
.result = REJECT,
},
{
@@ -6117,6 +6506,31 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
+ "ld_abs: tests on r6 and skb data reload helper",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42 /* ultimate return value */,
+ },
+ {
"ld_ind: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
@@ -6186,6 +6600,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = 1,
},
{
"check bpf_perf_event_data->sample_period byte load permitted",
@@ -6300,7 +6715,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6324,7 +6739,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6350,7 +6765,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R8 invalid mem access 'inv'",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6375,7 +6790,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R8 invalid mem access 'inv'",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6423,7 +6838,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6494,7 +6909,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6545,7 +6960,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6572,7 +6987,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6598,7 +7013,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6627,7 +7042,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -6657,7 +7072,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map1 = { 4 },
- .errstr = "R0 min value is negative",
+ .errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
},
{
@@ -6685,8 +7100,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
- .errstr_unpriv = "R0 pointer comparison prohibited",
- .errstr = "R0 min value is negative",
+ .errstr = "unbounded min value",
.result = REJECT,
.result_unpriv = REJECT,
},
@@ -6742,6 +7156,464 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "bounds check based on zero-extended MOV",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0x0000'0000'ffff'ffff */
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT
+ },
+ {
+ "bounds check based on sign-extended MOV. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "map_value pointer and 4294967295",
+ .result = REJECT
+ },
+ {
+ "bounds check based on sign-extended MOV. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xfff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT
+ },
+ {
+ "bounds check based on reg_off + var_off + insn_off. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 4 },
+ .errstr = "value_size=8 off=1073741825",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "bounds check based on reg_off + var_off + insn_off. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 4 },
+ .errstr = "value 1073741823",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "bounds check after truncation of non-boundary-crossing range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ /* r2 = 0x10'0000'0000 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+ /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = [0x00, 0xff] */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT
+ },
+ {
+ "bounds check after truncation of boundary-crossing range (1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+ },
+ {
+ "bounds check after truncation of boundary-crossing range (2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ * difference to previous test: truncation via MOV32
+ * instead of ALU32.
+ */
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+ },
+ {
+ "bounds check after wrapping 32-bit addition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ /* r1 = 0x7fff'ffff */
+ BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+ /* r1 = 0xffff'fffe */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT
+ },
+ {
+ "bounds check after shift with oversized count operand",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ /* r1 = (u32)1 << (u32)32 = ? */
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x0000, 0xffff] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT
+ },
+ {
+ "bounds check after right shift of maybe-negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ /* r1 = [-0x01, 0xfe] */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ /* r1 = 0 or 0xff'ffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* r1 = 0 or 0xffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+ },
+ {
+ "bounds check map access with off+size signed 32bit overflow. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "map_value pointer and 2147483646",
+ .result = REJECT
+ },
+ {
+ "bounds check map access with off+size signed 32bit overflow. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "pointer offset 1073741822",
+ .result = REJECT
+ },
+ {
+ "bounds check map access with off+size signed 32bit overflow. test3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "pointer offset -1073741822",
+ .result = REJECT
+ },
+ {
+ "bounds check map access with off+size signed 32bit overflow. test4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1000000),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "map_value pointer and 1000000000000",
+ .result = REJECT
+ },
+ {
+ "pointer/scalar confusion in state equality check (way 1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+ },
+ {
+ "pointer/scalar confusion in state equality check (way 2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+ },
+ {
"variable-offset ctx access",
.insns = {
/* Get an unknown value */
@@ -6783,6 +7655,71 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
+ "indirect variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it indirectly */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 5 },
+ .errstr = "variable stack read R2",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+ },
+ {
+ "direct stack access with 32-bit wraparound. test1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ },
+ .errstr = "fp pointer and 2147483647",
+ .result = REJECT
+ },
+ {
+ "direct stack access with 32-bit wraparound. test2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ },
+ .errstr = "fp pointer and 1073741823",
+ .result = REJECT
+ },
+ {
+ "direct stack access with 32-bit wraparound. test3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ },
+ .errstr = "fp pointer offset 1073741822",
+ .result = REJECT
+ },
+ {
"liveness pruning and write screening",
.insns = {
/* Get an unknown value */
@@ -6839,10 +7776,24 @@ static struct bpf_test tests[] = {
},
BPF_EXIT_INSN(),
},
- .errstr = "BPF_END uses reserved fields",
+ .errstr = "unknown opcode d7",
.result = REJECT,
},
{
+ "XDP, using ifindex from netdev",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, ingress_ifindex)),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+ },
+ {
"meta access, test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -7104,6 +8055,20 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "pkt_end - pkt_start is allowed",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"XDP pkt read, pkt_end mangling, bad access 1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -7118,7 +8083,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "R1 offset is outside of the packet",
+ .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@@ -7137,7 +8102,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "R1 offset is outside of the packet",
+ .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@@ -8026,6 +8991,128 @@ static struct bpf_test tests[] = {
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "check deducing bounds from const, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "check deducing bounds from const, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check deducing bounds from const, 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ },
+ {
+ "check deducing bounds from const, 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ },
+ {
+ "check deducing bounds from const, 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ /* Marks reg as unknown. */
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+ },
+ {
"bpf_exit with invalid return code. test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
@@ -8097,6 +9184,1959 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
+ {
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: not on unpriviledged",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: div by 0 in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_2, 0),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: multiple ret types in subprog 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ },
+ {
+ "calls: multiple ret types in subprog 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 16 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ },
+ {
+ "calls: overlapping caller/callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn is not an exit or jmp",
+ .result = REJECT,
+ },
+ {
+ "calls: wrong recursive calls",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: wrong src reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+ },
+ {
+ "calls: wrong off value",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+ },
+ {
+ "calls: jump back loop",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn 0 to 0",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: conditional call 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call 4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: conditional call 5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call 6",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+ },
+ {
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: using uninit r0 from callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ },
+ {
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ },
+ {
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: callee using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+ },
+ {
+ "calls: callee changing pkt pointers",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* clear_all_pkt_pointers() has to walk all frames
+ * to make sure that pkt pointers in the caller
+ * are cleared when callee is calling a helper that
+ * adjusts packet size
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_xdp_adjust_head),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R6 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN,
+ },
+ {
+ "calls: calls with stack arith",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "calls: calls with misaligned stack access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+ },
+ {
+ "calls: calls control flow, jump test",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 43,
+ },
+ {
+ "calls: calls control flow, jump test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "jump out of range from insn 1 to 4",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+ },
+ {
+ "calls: recursive call. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+ },
+ {
+ "calls: recursive call. test2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+ },
+ {
+ "calls: unreachable code",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "unreachable insn 6",
+ .result = REJECT,
+ },
+ {
+ "calls: invalid call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+ },
+ {
+ "calls: invalid call 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+ },
+ {
+ "calls: jumping across function bodies. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: jumping across function bodies. test2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: call without exit",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+ },
+ {
+ "calls: call into middle of ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+ },
+ {
+ "calls: call into middle of other call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+ },
+ {
+ "calls: ld_abs with changing ctx data in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack overflow using two frames (pre-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+ },
+ {
+ "calls: stack overflow using two frames (post-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+ },
+ {
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+ },
+ {
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+ },
+ {
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+ },
+ {
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot spill",
+ .result = REJECT,
+ },
+ {
+ "calls: write into caller stack frame",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "calls: write into callee stack frame",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot return stack pointer",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ },
+ {
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map1 = { 23 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map1 = { 23 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that return map_value with incorrect bool check",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map1 = { 23 },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -16+0 size 8",
+ },
+ {
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ },
+ {
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0), // 26
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+ BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ },
+ {
+ "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 0 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ },
+ {
+ "calls: pkt_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* Marking is still kept, but not in all cases safe. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Marking is still kept and safe here. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Check marking propagated. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "same insn cannot be used with different",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ },
+ {
+ "calls: caller stack init to zero or map_value_or_null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ /* fetch map_value_or_null or const_zero from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* store into map_value */
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* if (ctx == 0) return; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 13 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "calls: stack init to zero and pruning",
+ .insns = {
+ /* first make allocated_stack 16 byte */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* now fork the execution such that the false branch
+ * of JGT insn will be verified second and it skisp zero
+ * init of fp-8 stack slot. If stack liveness marking
+ * is missing live_read marks from call map_lookup
+ * processing then pruning will incorrectly assume
+ * that fp-8 stack slot was unused in the fall-through
+ * branch and will accept the program incorrectly
+ */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 6 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "search pruning: all branches should be verified (nop operation)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "R6 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "search pruning: all branches should be verified (invalid stack access)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_JMP_A(1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "invalid read from stack off -16+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -8208,10 +11248,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int fd_prog, expected_ret, reject_from_alignment;
struct bpf_insn *prog = test->insns;
int prog_len = probe_filter_length(prog);
+ char data_in[TEST_DATA_LEN] = {};
int prog_type = test->prog_type;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
- int i;
+ uint32_t retval;
+ int i, err;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
@@ -8254,6 +11296,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
}
+ if (fd_prog >= 0) {
+ err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
+ NULL, NULL, &retval, NULL);
+ if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
+ printf("Unexpected bpf_prog_test_run error\n");
+ goto fail_log;
+ }
+ if (!err && retval != test->retval &&
+ test->retval != POINTER_VALUE) {
+ printf("FAIL retval %d != %d\n", retval, test->retval);
+ goto fail_log;
+ }
+ }
(*passes)++;
printf("OK%s\n", reject_from_alignment ?
" (NOTE: reject due to unknown alignment)" : "");
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index 3cc0b561489e..e9626cf5607a 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -3,6 +3,8 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
int main(int argc, char **argv)
{
+ struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
char full_log[LOG_SIZE];
char log[LOG_SIZE];
size_t want_len;
int i;
+ /* allow unlimited locked memory to have more consistent error code */
+ if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+ perror("Unable to lift memlock rlimit");
+
memset(log, 1, LOG_SIZE);
/* Test incorrect attr */
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 307aa856cee3..637fcf4fe4e3 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -9,6 +9,7 @@ cleanup()
fi
set +e
+ ip link del veth1 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/test_xdp_noinline.c
new file mode 100644
index 000000000000..5e4aac74f9d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_noinline.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+
+#define bpf_printk(fmt, ...) \
+({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+typedef unsigned int u32;
+
+static __attribute__ ((noinline))
+u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(u32 *)(k);
+ b += *(u32 *)(k + 4);
+ c += *(u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+static __attribute__ ((noinline))
+u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+ __jhash_final(a, b, c);
+ return c;
+}
+
+static __attribute__ ((noinline))
+u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+struct flow_key {
+ union {
+ __be32 src;
+ __be32 srcv6[4];
+ };
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ union {
+ __u32 ports;
+ __u16 port16[2];
+ };
+ __u8 proto;
+};
+
+struct packet_description {
+ struct flow_key flow;
+ __u8 flags;
+};
+
+struct ctl_value {
+ union {
+ __u64 value;
+ __u32 ifindex;
+ __u8 mac[6];
+ };
+};
+
+struct vip_definition {
+ union {
+ __be32 vip;
+ __be32 vipv6[4];
+ };
+ __u16 port;
+ __u16 family;
+ __u8 proto;
+};
+
+struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+};
+
+struct real_pos_lru {
+ __u32 pos;
+ __u64 atime;
+};
+
+struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+};
+
+struct lb_stats {
+ __u64 v2;
+ __u64 v1;
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct vip_definition),
+ .value_size = sizeof(struct vip_meta),
+ .max_entries = 512,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
+ .type = BPF_MAP_TYPE_LRU_HASH,
+ .key_size = sizeof(struct flow_key),
+ .value_size = sizeof(struct real_pos_lru),
+ .max_entries = 300,
+ .map_flags = 1U << 1,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 12 * 655,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct real_definition),
+ .max_entries = 40,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct lb_stats),
+ .max_entries = 515,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct ctl_value),
+ .max_entries = 16,
+ .map_flags = 0,
+};
+
+struct eth_hdr {
+ unsigned char eth_dest[6];
+ unsigned char eth_source[6];
+ unsigned short eth_proto;
+};
+
+static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
+{
+ __u64 off = sizeof(struct eth_hdr);
+ if (is_ipv6) {
+ off += sizeof(struct ipv6hdr);
+ if (is_icmp)
+ off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
+ } else {
+ off += sizeof(struct iphdr);
+ if (is_icmp)
+ off += sizeof(struct icmphdr) + sizeof(struct iphdr);
+ }
+ return off;
+}
+
+static __attribute__ ((noinline))
+bool parse_udp(void *data, void *data_end,
+ bool is_ipv6, struct packet_description *pckt)
+{
+
+ bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+ __u64 off = calc_offset(is_ipv6, is_icmp);
+ struct udphdr *udp;
+ udp = data + off;
+
+ if (udp + 1 > data_end)
+ return 0;
+ if (!is_icmp) {
+ pckt->flow.port16[0] = udp->source;
+ pckt->flow.port16[1] = udp->dest;
+ } else {
+ pckt->flow.port16[0] = udp->dest;
+ pckt->flow.port16[1] = udp->source;
+ }
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool parse_tcp(void *data, void *data_end,
+ bool is_ipv6, struct packet_description *pckt)
+{
+
+ bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+ __u64 off = calc_offset(is_ipv6, is_icmp);
+ struct tcphdr *tcp;
+
+ tcp = data + off;
+ if (tcp + 1 > data_end)
+ return 0;
+ if (tcp->syn)
+ pckt->flags |= (1 << 1);
+ if (!is_icmp) {
+ pckt->flow.port16[0] = tcp->source;
+ pckt->flow.port16[1] = tcp->dest;
+ } else {
+ pckt->flow.port16[0] = tcp->dest;
+ pckt->flow.port16[1] = tcp->source;
+ }
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
+ struct packet_description *pckt,
+ struct real_definition *dst, __u32 pkt_bytes)
+{
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+ struct ipv6hdr *ip6h;
+ __u32 ip_suffix;
+ void *data_end;
+ void *data;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+ return 0;
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ new_eth = data;
+ ip6h = data + sizeof(struct eth_hdr);
+ old_eth = data + sizeof(struct ipv6hdr);
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end || ip6h + 1 > data_end)
+ return 0;
+ memcpy(new_eth->eth_dest, cval->mac, 6);
+ memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+ new_eth->eth_proto = 56710;
+ ip6h->version = 6;
+ ip6h->priority = 0;
+ memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+
+ ip6h->nexthdr = IPPROTO_IPV6;
+ ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
+ ip6h->payload_len =
+ __builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
+ ip6h->hop_limit = 4;
+
+ ip6h->saddr.in6_u.u6_addr32[0] = 1;
+ ip6h->saddr.in6_u.u6_addr32[1] = 2;
+ ip6h->saddr.in6_u.u6_addr32[2] = 3;
+ ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
+ memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
+ struct packet_description *pckt,
+ struct real_definition *dst, __u32 pkt_bytes)
+{
+
+ __u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+ __u16 *next_iph_u16;
+ struct iphdr *iph;
+ __u32 csum = 0;
+ void *data_end;
+ void *data;
+
+ ip_suffix <<= 15;
+ ip_suffix ^= pckt->flow.src;
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+ return 0;
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ new_eth = data;
+ iph = data + sizeof(struct eth_hdr);
+ old_eth = data + sizeof(struct iphdr);
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end || iph + 1 > data_end)
+ return 0;
+ memcpy(new_eth->eth_dest, cval->mac, 6);
+ memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+ new_eth->eth_proto = 8;
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_IPIP;
+ iph->check = 0;
+ iph->tos = 1;
+ iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
+ /* don't update iph->daddr, since it will overwrite old eth_proto
+ * and multiple iterations of bpf_prog_run() will fail
+ */
+
+ iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
+ iph->ttl = 4;
+
+ next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+ for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+ csum += *next_iph_u16++;
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+ if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+ return 0;
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
+{
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+
+ old_eth = *data;
+ new_eth = *data + sizeof(struct ipv6hdr);
+ memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+ memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+ if (inner_v4)
+ new_eth->eth_proto = 8;
+ else
+ new_eth->eth_proto = 56710;
+ if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
+ return 0;
+ *data = (void *)(long)xdp->data;
+ *data_end = (void *)(long)xdp->data_end;
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
+{
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+
+ old_eth = *data;
+ new_eth = *data + sizeof(struct iphdr);
+ memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+ memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+ new_eth->eth_proto = 8;
+ if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+ return 0;
+ *data = (void *)(long)xdp->data;
+ *data_end = (void *)(long)xdp->data_end;
+ return 1;
+}
+
+static __attribute__ ((noinline))
+int swap_mac_and_send(void *data, void *data_end)
+{
+ unsigned char tmp_mac[6];
+ struct eth_hdr *eth;
+
+ eth = data;
+ memcpy(tmp_mac, eth->eth_source, 6);
+ memcpy(eth->eth_source, eth->eth_dest, 6);
+ memcpy(eth->eth_dest, tmp_mac, 6);
+ return XDP_TX;
+}
+
+static __attribute__ ((noinline))
+int send_icmp_reply(void *data, void *data_end)
+{
+ struct icmphdr *icmp_hdr;
+ __u16 *next_iph_u16;
+ __u32 tmp_addr = 0;
+ struct iphdr *iph;
+ __u32 csum1 = 0;
+ __u32 csum = 0;
+ __u64 off = 0;
+
+ if (data + sizeof(struct eth_hdr)
+ + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
+ return XDP_DROP;
+ off += sizeof(struct eth_hdr);
+ iph = data + off;
+ off += sizeof(struct iphdr);
+ icmp_hdr = data + off;
+ icmp_hdr->type = 0;
+ icmp_hdr->checksum += 0x0007;
+ iph->ttl = 4;
+ tmp_addr = iph->daddr;
+ iph->daddr = iph->saddr;
+ iph->saddr = tmp_addr;
+ iph->check = 0;
+ next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+ for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+ csum += *next_iph_u16++;
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+ return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int send_icmp6_reply(void *data, void *data_end)
+{
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+ __be32 tmp_addr[4];
+ __u64 off = 0;
+
+ if (data + sizeof(struct eth_hdr)
+ + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
+ return XDP_DROP;
+ off += sizeof(struct eth_hdr);
+ ip6h = data + off;
+ off += sizeof(struct ipv6hdr);
+ icmp_hdr = data + off;
+ icmp_hdr->icmp6_type = 129;
+ icmp_hdr->icmp6_cksum -= 0x0001;
+ ip6h->hop_limit = 4;
+ memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
+ memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
+ memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
+ return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int parse_icmpv6(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return XDP_DROP;
+ if (icmp_hdr->icmp6_type == 128)
+ return send_icmp6_reply(data, data_end);
+ if (icmp_hdr->icmp6_type != 3)
+ return XDP_PASS;
+ off += sizeof(struct icmp6hdr);
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+ pckt->flow.proto = ip6h->nexthdr;
+ pckt->flags |= (1 << 0);
+ memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
+ memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
+ return -1;
+}
+
+static __attribute__ ((noinline))
+int parse_icmp(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmphdr *icmp_hdr;
+ struct iphdr *iph;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return XDP_DROP;
+ if (icmp_hdr->type == 8)
+ return send_icmp_reply(data, data_end);
+ if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
+ return XDP_PASS;
+ off += sizeof(struct icmphdr);
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+ if (iph->ihl != 5)
+ return XDP_DROP;
+ pckt->flow.proto = iph->protocol;
+ pckt->flags |= (1 << 0);
+ pckt->flow.src = iph->daddr;
+ pckt->flow.dst = iph->saddr;
+ return -1;
+}
+
+static __attribute__ ((noinline))
+__u32 get_packet_hash(struct packet_description *pckt,
+ bool hash_16bytes)
+{
+ if (hash_16bytes)
+ return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
+ pckt->flow.ports, 24);
+ else
+ return jhash_2words(pckt->flow.src, pckt->flow.ports,
+ 24);
+}
+
+__attribute__ ((noinline))
+static bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6, void *lru_map)
+{
+ struct real_pos_lru new_dst_lru = { };
+ bool hash_16bytes = is_ipv6;
+ __u32 *real_pos, hash, key;
+ __u64 cur_time;
+
+ if (vip_info->flags & (1 << 2))
+ hash_16bytes = 1;
+ if (vip_info->flags & (1 << 3)) {
+ pckt->flow.port16[0] = pckt->flow.port16[1];
+ memset(pckt->flow.srcv6, 0, 16);
+ }
+ hash = get_packet_hash(pckt, hash_16bytes);
+ if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
+ hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+ return 0;
+ key = 2 * vip_info->vip_num + hash % 2;
+ real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+ if (!real_pos)
+ return 0;
+ key = *real_pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+ if (!(*real))
+ return 0;
+ if (!(vip_info->flags & (1 << 1))) {
+ __u32 conn_rate_key = 512 + 2;
+ struct lb_stats *conn_rate_stats =
+ bpf_map_lookup_elem(&stats, &conn_rate_key);
+
+ if (!conn_rate_stats)
+ return 1;
+ cur_time = bpf_ktime_get_ns();
+ if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
+ conn_rate_stats->v1 = 1;
+ conn_rate_stats->v2 = cur_time;
+ } else {
+ conn_rate_stats->v1 += 1;
+ if (conn_rate_stats->v1 >= 1)
+ return 1;
+ }
+ if (pckt->flow.proto == IPPROTO_UDP)
+ new_dst_lru.atime = cur_time;
+ new_dst_lru.pos = key;
+ bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
+ }
+ return 1;
+}
+
+__attribute__ ((noinline))
+static void connection_table_lookup(struct real_definition **real,
+ struct packet_description *pckt,
+ void *lru_map)
+{
+
+ struct real_pos_lru *dst_lru;
+ __u64 cur_time;
+ __u32 key;
+
+ dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
+ if (!dst_lru)
+ return;
+ if (pckt->flow.proto == IPPROTO_UDP) {
+ cur_time = bpf_ktime_get_ns();
+ if (cur_time - dst_lru->atime > 300000)
+ return;
+ dst_lru->atime = cur_time;
+ }
+ key = dst_lru->pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+}
+
+/* don't believe your eyes!
+ * below function has 6 arguments whereas bpf and llvm allow maximum of 5
+ * but since it's _static_ llvm can optimize one argument away
+ */
+__attribute__ ((noinline))
+static int process_l3_headers_v6(struct packet_description *pckt,
+ __u8 *protocol, __u64 off,
+ __u16 *pkt_bytes, void *data,
+ void *data_end)
+{
+ struct ipv6hdr *ip6h;
+ __u64 iph_len;
+ int action;
+
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+ iph_len = sizeof(struct ipv6hdr);
+ *protocol = ip6h->nexthdr;
+ pckt->flow.proto = *protocol;
+ *pkt_bytes = __builtin_bswap16(ip6h->payload_len);
+ off += iph_len;
+ if (*protocol == 45) {
+ return XDP_DROP;
+ } else if (*protocol == 59) {
+ action = parse_icmpv6(data, data_end, off, pckt);
+ if (action >= 0)
+ return action;
+ } else {
+ memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
+ memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
+ }
+ return -1;
+}
+
+__attribute__ ((noinline))
+static int process_l3_headers_v4(struct packet_description *pckt,
+ __u8 *protocol, __u64 off,
+ __u16 *pkt_bytes, void *data,
+ void *data_end)
+{
+ struct iphdr *iph;
+ __u64 iph_len;
+ int action;
+
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+ if (iph->ihl != 5)
+ return XDP_DROP;
+ *protocol = iph->protocol;
+ pckt->flow.proto = *protocol;
+ *pkt_bytes = __builtin_bswap16(iph->tot_len);
+ off += 20;
+ if (iph->frag_off & 65343)
+ return XDP_DROP;
+ if (*protocol == IPPROTO_ICMP) {
+ action = parse_icmp(data, data_end, off, pckt);
+ if (action >= 0)
+ return action;
+ } else {
+ pckt->flow.src = iph->saddr;
+ pckt->flow.dst = iph->daddr;
+ }
+ return -1;
+}
+
+__attribute__ ((noinline))
+static int process_packet(void *data, __u64 off, void *data_end,
+ bool is_ipv6, struct xdp_md *xdp)
+{
+
+ struct real_definition *dst = NULL;
+ struct packet_description pckt = { };
+ struct vip_definition vip = { };
+ struct lb_stats *data_stats;
+ struct eth_hdr *eth = data;
+ void *lru_map = &lru_cache;
+ struct vip_meta *vip_info;
+ __u32 lru_stats_key = 513;
+ __u32 mac_addr_pos = 0;
+ __u32 stats_key = 512;
+ struct ctl_value *cval;
+ __u16 pkt_bytes;
+ __u64 iph_len;
+ __u8 protocol;
+ __u32 vip_num;
+ int action;
+
+ if (is_ipv6)
+ action = process_l3_headers_v6(&pckt, &protocol, off,
+ &pkt_bytes, data, data_end);
+ else
+ action = process_l3_headers_v4(&pckt, &protocol, off,
+ &pkt_bytes, data, data_end);
+ if (action >= 0)
+ return action;
+ protocol = pckt.flow.proto;
+ if (protocol == IPPROTO_TCP) {
+ if (!parse_tcp(data, data_end, is_ipv6, &pckt))
+ return XDP_DROP;
+ } else if (protocol == IPPROTO_UDP) {
+ if (!parse_udp(data, data_end, is_ipv6, &pckt))
+ return XDP_DROP;
+ } else {
+ return XDP_TX;
+ }
+
+ if (is_ipv6)
+ memcpy(vip.vipv6, pckt.flow.dstv6, 16);
+ else
+ vip.vip = pckt.flow.dst;
+ vip.port = pckt.flow.port16[1];
+ vip.proto = pckt.flow.proto;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info) {
+ vip.port = 0;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info)
+ return XDP_PASS;
+ if (!(vip_info->flags & (1 << 4)))
+ pckt.flow.port16[1] = 0;
+ }
+ if (data_end - data > 1400)
+ return XDP_DROP;
+ data_stats = bpf_map_lookup_elem(&stats, &stats_key);
+ if (!data_stats)
+ return XDP_DROP;
+ data_stats->v1 += 1;
+ if (!dst) {
+ if (vip_info->flags & (1 << 0))
+ pckt.flow.port16[0] = 0;
+ if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
+ connection_table_lookup(&dst, &pckt, lru_map);
+ if (dst)
+ goto out;
+ if (pckt.flow.proto == IPPROTO_TCP) {
+ struct lb_stats *lru_stats =
+ bpf_map_lookup_elem(&stats, &lru_stats_key);
+
+ if (!lru_stats)
+ return XDP_DROP;
+ if (pckt.flags & (1 << 1))
+ lru_stats->v1 += 1;
+ else
+ lru_stats->v2 += 1;
+ }
+ if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
+ return XDP_DROP;
+ data_stats->v2 += 1;
+ }
+out:
+ cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
+ if (!cval)
+ return XDP_DROP;
+ if (dst->flags & (1 << 0)) {
+ if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
+ return XDP_DROP;
+ } else {
+ if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
+ return XDP_DROP;
+ }
+ vip_num = vip_info->vip_num;
+ data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+ if (!data_stats)
+ return XDP_DROP;
+ data_stats->v1 += 1;
+ data_stats->v2 += pkt_bytes;
+
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ if (data + 4 > data_end)
+ return XDP_DROP;
+ *(u32 *)data = dst->dst;
+ return XDP_DROP;
+}
+
+__attribute__ ((section("xdp-test"), used))
+int balancer_ingress(struct xdp_md *ctx)
+{
+ void *data = (void *)(long)ctx->data;
+ void *data_end = (void *)(long)ctx->data_end;
+ struct eth_hdr *eth = data;
+ __u32 eth_proto;
+ __u32 nh_off;
+
+ nh_off = sizeof(struct eth_hdr);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+ eth_proto = eth->eth_proto;
+ if (eth_proto == 8)
+ return process_packet(data, nh_off, data_end, 0, ctx);
+ else if (eth_proto == 56710)
+ return process_packet(data, nh_off, data_end, 1, ctx);
+ else
+ return XDP_DROP;
+}
+
+char _license[] __attribute__ ((section("license"), used)) = "GPL";
+int _version __attribute__ ((section("version"), used)) = 1;
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh
index 344a3656dea6..c4b17e08d431 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh
@@ -19,6 +19,8 @@ cleanup()
fi
set +e
+ ip link del veth1 2> /dev/null
+ ip link del veth2 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
diff --git a/tools/testing/selftests/firmware/fw_fallback.sh b/tools/testing/selftests/firmware/fw_fallback.sh
index 34a42c68ebfb..722cad91df74 100755
--- a/tools/testing/selftests/firmware/fw_fallback.sh
+++ b/tools/testing/selftests/firmware/fw_fallback.sh
@@ -175,96 +175,118 @@ trap "test_finish" EXIT
echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
-DEVPATH="$DIR"/"nope-$NAME"/loading
-
-# Test failure when doing nothing (timeout works).
-echo -n 2 >/sys/class/firmware/timeout
-echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
-
-# Give the kernel some time to load the loading file, must be less
-# than the timeout above.
-sleep 1
-if [ ! -f $DEVPATH ]; then
- echo "$0: fallback mechanism immediately cancelled"
- echo ""
- echo "The file never appeared: $DEVPATH"
- echo ""
- echo "This might be a distribution udev rule setup by your distribution"
- echo "to immediately cancel all fallback requests, this must be"
- echo "removed before running these tests. To confirm look for"
- echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
- echo "and see if you have something like this:"
- echo ""
- echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
- echo ""
- echo "If you do remove this file or comment out this line before"
- echo "proceeding with these tests."
- exit 1
-fi
-
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was not expected to match" >&2
- exit 1
-else
- echo "$0: timeout works"
-fi
-
-# Put timeout high enough for us to do work but not so long that failures
-# slow down this test too much.
-echo 4 >/sys/class/firmware/timeout
+test_syfs_timeout()
+{
+ DEVPATH="$DIR"/"nope-$NAME"/loading
+
+ # Test failure when doing nothing (timeout works).
+ echo -n 2 >/sys/class/firmware/timeout
+ echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
+
+ # Give the kernel some time to load the loading file, must be less
+ # than the timeout above.
+ sleep 1
+ if [ ! -f $DEVPATH ]; then
+ echo "$0: fallback mechanism immediately cancelled"
+ echo ""
+ echo "The file never appeared: $DEVPATH"
+ echo ""
+ echo "This might be a distribution udev rule setup by your distribution"
+ echo "to immediately cancel all fallback requests, this must be"
+ echo "removed before running these tests. To confirm look for"
+ echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
+ echo "and see if you have something like this:"
+ echo ""
+ echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
+ echo ""
+ echo "If you do remove this file or comment out this line before"
+ echo "proceeding with these tests."
+ exit 1
+ fi
-# Load this script instead of the desired firmware.
-load_fw "$NAME" "$0"
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was not expected to match" >&2
- exit 1
-else
- echo "$0: firmware comparison works"
-fi
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not expected to match" >&2
+ exit 1
+ else
+ echo "$0: timeout works"
+ fi
+}
-# Do a proper load, which should work correctly.
-load_fw "$NAME" "$FW"
-if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was not loaded" >&2
- exit 1
-else
- echo "$0: fallback mechanism works"
-fi
+run_sysfs_main_tests()
+{
+ test_syfs_timeout
+ # Put timeout high enough for us to do work but not so long that failures
+ # slow down this test too much.
+ echo 4 >/sys/class/firmware/timeout
-load_fw_cancel "nope-$NAME" "$FW"
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was expected to be cancelled" >&2
- exit 1
-else
- echo "$0: cancelling fallback mechanism works"
-fi
+ # Load this script instead of the desired firmware.
+ load_fw "$NAME" "$0"
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not expected to match" >&2
+ exit 1
+ else
+ echo "$0: firmware comparison works"
+ fi
-if load_fw_custom "$NAME" "$FW" ; then
+ # Do a proper load, which should work correctly.
+ load_fw "$NAME" "$FW"
if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not loaded" >&2
exit 1
else
- echo "$0: custom fallback loading mechanism works"
+ echo "$0: fallback mechanism works"
fi
-fi
-if load_fw_custom_cancel "nope-$NAME" "$FW" ; then
+ load_fw_cancel "nope-$NAME" "$FW"
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was expected to be cancelled" >&2
exit 1
else
- echo "$0: cancelling custom fallback mechanism works"
+ echo "$0: cancelling fallback mechanism works"
fi
-fi
-set +e
-load_fw_fallback_with_child "nope-signal-$NAME" "$FW"
-if [ "$?" -eq 0 ]; then
- echo "$0: SIGCHLD on sync ignored as expected" >&2
-else
- echo "$0: error - sync firmware request cancelled due to SIGCHLD" >&2
- exit 1
-fi
-set -e
+ set +e
+ load_fw_fallback_with_child "nope-signal-$NAME" "$FW"
+ if [ "$?" -eq 0 ]; then
+ echo "$0: SIGCHLD on sync ignored as expected" >&2
+ else
+ echo "$0: error - sync firmware request cancelled due to SIGCHLD" >&2
+ exit 1
+ fi
+ set -e
+}
+
+run_sysfs_custom_load_tests()
+{
+ if load_fw_custom "$NAME" "$FW" ; then
+ if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not loaded" >&2
+ exit 1
+ else
+ echo "$0: custom fallback loading mechanism works"
+ fi
+ fi
+
+ if load_fw_custom "$NAME" "$FW" ; then
+ if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not loaded" >&2
+ exit 1
+ else
+ echo "$0: custom fallback loading mechanism works"
+ fi
+ fi
+
+ if load_fw_custom_cancel "nope-$NAME" "$FW" ; then
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was expected to be cancelled" >&2
+ exit 1
+ else
+ echo "$0: cancelling custom fallback mechanism works"
+ fi
+ fi
+}
+
+run_sysfs_main_tests
+run_sysfs_custom_load_tests
exit 0
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index b1f20fef36c7..f9508e1a4058 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -45,7 +45,10 @@ test_finish()
if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
fi
- echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
+ if [ "$OLD_FWPATH" = "" ]; then
+ OLD_FWPATH=" "
+ fi
+ echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
rm -f "$FW"
rmdir "$FWPATH"
}
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 589d52b211b7..27a54a17da65 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -29,6 +29,12 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
# filter by *, end match
ftrace_filter_check 'schedule*' '^schedule.*$'
+# filter by *mid*end
+ftrace_filter_check '*aw*lock' '.*aw.*lock$'
+
+# filter by start*mid*
+ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
+
# Advanced full-glob matching feature is recently supported.
# Skip the tests if we are sure the kernel does not support it.
if grep -q 'accepts: .* glob-matching-pattern' README ; then
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
index 0f3f92622e33..68e7a48f5828 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
@@ -128,6 +128,43 @@ if check_set_ftrace_filter "$FUNC1" "$FUNC2" ; then
fail "Expected $FUNC1 and $FUNC2"
fi
+test_actual() { # Compares $TMPDIR/expected with set_ftrace_filter
+ cat set_ftrace_filter | grep -v '#' | cut -d' ' -f1 | cut -d':' -f1 | sort -u > $TMPDIR/actual
+ DIFF=`diff $TMPDIR/actual $TMPDIR/expected`
+ test -z "$DIFF"
+}
+
+# Set traceoff trigger for all fuctions with "lock" in their name
+cat available_filter_functions | cut -d' ' -f1 | grep 'lock' | sort -u > $TMPDIR/expected
+echo '*lock*:traceoff' > set_ftrace_filter
+test_actual
+
+# now remove all with 'try' in it, and end with lock
+grep -v 'try.*lock$' $TMPDIR/expected > $TMPDIR/expected2
+mv $TMPDIR/expected2 $TMPDIR/expected
+echo '!*try*lock:traceoff' >> set_ftrace_filter
+test_actual
+
+# remove all that start with "m" and end with "lock"
+grep -v '^m.*lock$' $TMPDIR/expected > $TMPDIR/expected2
+mv $TMPDIR/expected2 $TMPDIR/expected
+echo '!m*lock:traceoff' >> set_ftrace_filter
+test_actual
+
+# remove all that start with "c" and have "unlock"
+grep -v '^c.*unlock' $TMPDIR/expected > $TMPDIR/expected2
+mv $TMPDIR/expected2 $TMPDIR/expected
+echo '!c*unlock*:traceoff' >> set_ftrace_filter
+test_actual
+
+# clear all the rest
+> $TMPDIR/expected
+echo '!*:traceoff' >> set_ftrace_filter
+test_actual
+
+rm $TMPDIR/expected
+rm $TMPDIR/actual
+
do_reset
exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index f2019b37370d..df3dd7fe5f9b 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -37,17 +37,21 @@ reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
if [ "$tr" = "" ]; then
continue
fi
+ if ! grep -q "$t" set_ftrace_filter; then
+ continue;
+ fi
+ name=`echo $t | cut -d: -f1 | cut -d' ' -f1`
if [ $tr = "enable_event" -o $tr = "disable_event" ]; then
- tr=`echo $t | cut -d: -f1-4`
+ tr=`echo $t | cut -d: -f2-4`
limit=`echo $t | cut -d: -f5`
else
- tr=`echo $t | cut -d: -f1-2`
+ tr=`echo $t | cut -d: -f2`
limit=`echo $t | cut -d: -f3`
fi
if [ "$limit" != "unlimited" ]; then
tr="$tr:$limit"
fi
- echo "!$tr" > set_ftrace_filter
+ echo "!$name:$tr" > set_ftrace_filter
done
}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
index bb16cf91f1b5..ce361b9d62cf 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
@@ -12,9 +12,24 @@ case `uname -m` in
*) OFFS=0;;
esac
-echo "Setup up to 256 kprobes"
-grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
-head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:
+if [ -d events/kprobes ]; then
+ echo 0 > events/kprobes/enable
+ echo > kprobe_events
+fi
+
+N=0
+echo "Setup up kprobes on first available 256 text symbols"
+grep -i " t " /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
+while read i; do
+ echo p ${i}+${OFFS} >> kprobe_events && N=$((N+1)) ||:
+ test $N -eq 256 && break
+done
+
+L=`wc -l kprobe_events`
+if [ $L -ne $N ]; then
+ echo "The number of kprobes events ($L) is not $N"
+ exit_fail
+fi
echo 1 > events/kprobes/enable
echo 0 > events/kprobes/enable
diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh
index 17d5bd0c0936..a27e2eec3586 100755
--- a/tools/testing/selftests/gen_kselftest_tar.sh
+++ b/tools/testing/selftests/gen_kselftest_tar.sh
@@ -1,13 +1,11 @@
#!/bin/bash
#
+# SPDX-License-Identifier: GPL-2.0
# gen_kselftest_tar
# Generate kselftest tarball
# Author: Shuah Khan <shuahkh@osg.samsung.com>
# Copyright (C) 2015 Samsung Electronics Co., Ltd.
-# This software may be freely redistributed under the terms of the GNU
-# General Public License (GPLv2).
-
# main
main()
{
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 1ae565ed9bf0..1a52b03962a3 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* kselftest.h: kselftest framework return codes to include from
* selftests.
@@ -5,7 +6,6 @@
* Copyright (c) 2014 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
*
- * This file is released under the GPLv2.
*/
#ifndef __KSELFTEST_H
#define __KSELFTEST_H
diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh
index 1555fbdb08da..ec304463883c 100755
--- a/tools/testing/selftests/kselftest_install.sh
+++ b/tools/testing/selftests/kselftest_install.sh
@@ -1,13 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
#
# Kselftest Install
# Install kselftest tests
# Author: Shuah Khan <shuahkh@osg.samsung.com>
# Copyright (C) 2015 Samsung Electronics Co., Ltd.
-# This software may be freely redistributed under the terms of the GNU
-# General Public License (GPLv2).
-
install_loc=`pwd`
main()
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 5bef05d6ba39..7de482a0519d 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -77,7 +77,7 @@ endif
define EMIT_TESTS
@for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \
BASENAME_TEST=`basename $$TEST`; \
- echo "(./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \
+ echo "(./$$BASENAME_TEST >> \$$OUTPUT 2>&1 && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \
done;
endef
diff --git a/tools/testing/selftests/media_tests/Makefile b/tools/testing/selftests/media_tests/Makefile
index be5bd4ffb895..c82cec2497de 100644
--- a/tools/testing/selftests/media_tests/Makefile
+++ b/tools/testing/selftests/media_tests/Makefile
@@ -1,8 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := media_device_test media_device_open video_device_test
-all: $(TEST_PROGS)
+TEST_GEN_PROGS := media_device_test media_device_open video_device_test
+all: $(TEST_GEN_PROGS)
include ../lib.mk
-
-clean:
- rm -fr media_device_test media_device_open video_device_test
diff --git a/tools/testing/selftests/media_tests/media_device_open.c b/tools/testing/selftests/media_tests/media_device_open.c
index 44343c091a20..a5ce5434bafd 100644
--- a/tools/testing/selftests/media_tests/media_device_open.c
+++ b/tools/testing/selftests/media_tests/media_device_open.c
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
/*
* media_device_open.c - Media Controller Device Open Test
*
* Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
- * This file is released under the GPLv2.
*/
/*
diff --git a/tools/testing/selftests/media_tests/media_device_test.c b/tools/testing/selftests/media_tests/media_device_test.c
index 5d49943e77d0..421a367e4bb3 100644
--- a/tools/testing/selftests/media_tests/media_device_test.c
+++ b/tools/testing/selftests/media_tests/media_device_test.c
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
/*
* media_device_test.c - Media Controller Device ioctl loop Test
*
* Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
- * This file is released under the GPLv2.
*/
/*
diff --git a/tools/testing/selftests/media_tests/video_device_test.c b/tools/testing/selftests/media_tests/video_device_test.c
index 66d419c28653..0f6aef2e2593 100644
--- a/tools/testing/selftests/media_tests/video_device_test.c
+++ b/tools/testing/selftests/media_tests/video_device_test.c
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
/*
* video_device_test - Video Device Test
*
* Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
*
- * This file is released under the GPLv2.
*/
/*
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
index 9e674d9514d1..22bffd55a523 100644
--- a/tools/testing/selftests/membarrier/membarrier_test.c
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -16,49 +16,210 @@ static int sys_membarrier(int cmd, int flags)
static int test_membarrier_cmd_fail(void)
{
int cmd = -1, flags = 0;
+ const char *test_name = "sys membarrier invalid command";
if (sys_membarrier(cmd, flags) != -1) {
ksft_exit_fail_msg(
- "sys membarrier invalid command test: command = %d, flags = %d. Should fail, but passed\n",
- cmd, flags);
+ "%s test: command = %d, flags = %d. Should fail, but passed\n",
+ test_name, cmd, flags);
+ }
+ if (errno != EINVAL) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d. Should return (%d: \"%s\"), but returned (%d: \"%s\").\n",
+ test_name, flags, EINVAL, strerror(EINVAL),
+ errno, strerror(errno));
}
ksft_test_result_pass(
- "sys membarrier invalid command test: command = %d, flags = %d. Failed as expected\n",
- cmd, flags);
+ "%s test: command = %d, flags = %d, errno = %d. Failed as expected\n",
+ test_name, cmd, flags, errno);
return 0;
}
static int test_membarrier_flags_fail(void)
{
int cmd = MEMBARRIER_CMD_QUERY, flags = 1;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_QUERY invalid flags";
+
+ if (sys_membarrier(cmd, flags) != -1) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d. Should fail, but passed\n",
+ test_name, flags);
+ }
+ if (errno != EINVAL) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d. Should return (%d: \"%s\"), but returned (%d: \"%s\").\n",
+ test_name, flags, EINVAL, strerror(EINVAL),
+ errno, strerror(errno));
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d, errno = %d. Failed as expected\n",
+ test_name, flags, errno);
+ return 0;
+}
+
+static int test_membarrier_global_success(void)
+{
+ int cmd = MEMBARRIER_CMD_GLOBAL, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_GLOBAL";
+
+ if (sys_membarrier(cmd, flags) != 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d\n", test_name, flags);
+ return 0;
+}
+
+static int test_membarrier_private_expedited_fail(void)
+{
+ int cmd = MEMBARRIER_CMD_PRIVATE_EXPEDITED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_PRIVATE_EXPEDITED not registered failure";
+
+ if (sys_membarrier(cmd, flags) != -1) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d. Should fail, but passed\n",
+ test_name, flags);
+ }
+ if (errno != EPERM) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d. Should return (%d: \"%s\"), but returned (%d: \"%s\").\n",
+ test_name, flags, EPERM, strerror(EPERM),
+ errno, strerror(errno));
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ return 0;
+}
+
+static int test_membarrier_register_private_expedited_success(void)
+{
+ int cmd = MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED";
+
+ if (sys_membarrier(cmd, flags) != 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d\n",
+ test_name, flags);
+ return 0;
+}
+
+static int test_membarrier_private_expedited_success(void)
+{
+ int cmd = MEMBARRIER_CMD_PRIVATE_EXPEDITED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_PRIVATE_EXPEDITED";
+
+ if (sys_membarrier(cmd, flags) != 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d\n",
+ test_name, flags);
+ return 0;
+}
+
+static int test_membarrier_private_expedited_sync_core_fail(void)
+{
+ int cmd = MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE not registered failure";
if (sys_membarrier(cmd, flags) != -1) {
ksft_exit_fail_msg(
- "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Should fail, but passed\n",
- flags);
+ "%s test: flags = %d. Should fail, but passed\n",
+ test_name, flags);
+ }
+ if (errno != EPERM) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d. Should return (%d: \"%s\"), but returned (%d: \"%s\").\n",
+ test_name, flags, EPERM, strerror(EPERM),
+ errno, strerror(errno));
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ return 0;
+}
+
+static int test_membarrier_register_private_expedited_sync_core_success(void)
+{
+ int cmd = MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE";
+
+ if (sys_membarrier(cmd, flags) != 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d\n",
+ test_name, flags);
+ return 0;
+}
+
+static int test_membarrier_private_expedited_sync_core_success(void)
+{
+ int cmd = MEMBARRIER_CMD_PRIVATE_EXPEDITED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE";
+
+ if (sys_membarrier(cmd, flags) != 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
}
ksft_test_result_pass(
- "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Failed as expected\n",
- flags);
+ "%s test: flags = %d\n",
+ test_name, flags);
return 0;
}
-static int test_membarrier_success(void)
+static int test_membarrier_register_global_expedited_success(void)
{
- int cmd = MEMBARRIER_CMD_SHARED, flags = 0;
- const char *test_name = "sys membarrier MEMBARRIER_CMD_SHARED\n";
+ int cmd = MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED";
if (sys_membarrier(cmd, flags) != 0) {
ksft_exit_fail_msg(
- "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n",
- flags);
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
}
ksft_test_result_pass(
- "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n",
- flags);
+ "%s test: flags = %d\n",
+ test_name, flags);
+ return 0;
+}
+
+static int test_membarrier_global_expedited_success(void)
+{
+ int cmd = MEMBARRIER_CMD_GLOBAL_EXPEDITED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_GLOBAL_EXPEDITED";
+
+ if (sys_membarrier(cmd, flags) != 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ }
+
+ ksft_test_result_pass(
+ "%s test: flags = %d\n",
+ test_name, flags);
return 0;
}
@@ -72,7 +233,45 @@ static int test_membarrier(void)
status = test_membarrier_flags_fail();
if (status)
return status;
- status = test_membarrier_success();
+ status = test_membarrier_global_success();
+ if (status)
+ return status;
+ status = test_membarrier_private_expedited_fail();
+ if (status)
+ return status;
+ status = test_membarrier_register_private_expedited_success();
+ if (status)
+ return status;
+ status = test_membarrier_private_expedited_success();
+ if (status)
+ return status;
+ status = sys_membarrier(MEMBARRIER_CMD_QUERY, 0);
+ if (status < 0) {
+ ksft_test_result_fail("sys_membarrier() failed\n");
+ return status;
+ }
+ if (status & MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE) {
+ status = test_membarrier_private_expedited_sync_core_fail();
+ if (status)
+ return status;
+ status = test_membarrier_register_private_expedited_sync_core_success();
+ if (status)
+ return status;
+ status = test_membarrier_private_expedited_sync_core_success();
+ if (status)
+ return status;
+ }
+ /*
+ * It is valid to send a global membarrier from a non-registered
+ * process.
+ */
+ status = test_membarrier_global_expedited_success();
+ if (status)
+ return status;
+ status = test_membarrier_register_global_expedited_success();
+ if (status)
+ return status;
+ status = test_membarrier_global_expedited_success();
if (status)
return status;
return 0;
@@ -94,8 +293,10 @@ static int test_membarrier_query(void)
}
ksft_exit_fail_msg("sys_membarrier() failed\n");
}
- if (!(ret & MEMBARRIER_CMD_SHARED))
+ if (!(ret & MEMBARRIER_CMD_GLOBAL)) {
+ ksft_test_result_fail("sys_membarrier() CMD_GLOBAL query failed\n");
ksft_exit_fail_msg("sys_membarrier is not supported.\n");
+ }
ksft_test_result_pass("sys_membarrier available\n");
return 0;
@@ -108,5 +309,5 @@ int main(int argc, char **argv)
test_membarrier_query();
test_membarrier();
- ksft_exit_pass();
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index 3926a0409dda..a5276a91dfbf 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -12,3 +12,8 @@ fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
include ../lib.mk
$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
+
+$(OUTPUT)/memfd_test: memfd_test.c common.o
+$(OUTPUT)/fuse_test: fuse_test.c common.o
+
+EXTRA_CLEAN = common.o
diff --git a/tools/testing/selftests/memfd/common.c b/tools/testing/selftests/memfd/common.c
new file mode 100644
index 000000000000..8eb3d75f6e60
--- /dev/null
+++ b/tools/testing/selftests/memfd/common.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/fcntl.h>
+#include <linux/memfd.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#include "common.h"
+
+int hugetlbfs_test = 0;
+
+/*
+ * Copied from mlock2-tests.c
+ */
+unsigned long default_huge_page_size(void)
+{
+ unsigned long hps = 0;
+ char *line = NULL;
+ size_t linelen = 0;
+ FILE *f = fopen("/proc/meminfo", "r");
+
+ if (!f)
+ return 0;
+ while (getline(&line, &linelen, f) > 0) {
+ if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
+ hps <<= 10;
+ break;
+ }
+ }
+
+ free(line);
+ fclose(f);
+ return hps;
+}
+
+int sys_memfd_create(const char *name, unsigned int flags)
+{
+ if (hugetlbfs_test)
+ flags |= MFD_HUGETLB;
+
+ return syscall(__NR_memfd_create, name, flags);
+}
diff --git a/tools/testing/selftests/memfd/common.h b/tools/testing/selftests/memfd/common.h
new file mode 100644
index 000000000000..522d2c630bd8
--- /dev/null
+++ b/tools/testing/selftests/memfd/common.h
@@ -0,0 +1,9 @@
+#ifndef COMMON_H_
+#define COMMON_H_
+
+extern int hugetlbfs_test;
+
+unsigned long default_huge_page_size(void);
+int sys_memfd_create(const char *name, unsigned int flags);
+
+#endif
diff --git a/tools/testing/selftests/memfd/fuse_test.c b/tools/testing/selftests/memfd/fuse_test.c
index 1ccb7a3eb14b..b018e835737d 100644
--- a/tools/testing/selftests/memfd/fuse_test.c
+++ b/tools/testing/selftests/memfd/fuse_test.c
@@ -33,14 +33,12 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "common.h"
+
#define MFD_DEF_SIZE 8192
#define STACK_SIZE 65536
-static int sys_memfd_create(const char *name,
- unsigned int flags)
-{
- return syscall(__NR_memfd_create, name, flags);
-}
+static size_t mfd_def_size = MFD_DEF_SIZE;
static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
{
@@ -127,7 +125,7 @@ static void *mfd_assert_mmap_shared(int fd)
void *p;
p = mmap(NULL,
- MFD_DEF_SIZE,
+ mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
@@ -145,7 +143,7 @@ static void *mfd_assert_mmap_private(int fd)
void *p;
p = mmap(NULL,
- MFD_DEF_SIZE,
+ mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
@@ -178,7 +176,7 @@ static int sealing_thread_fn(void *arg)
usleep(200000);
/* unmount mapping before sealing to avoid i_mmap_writable failures */
- munmap(global_p, MFD_DEF_SIZE);
+ munmap(global_p, mfd_def_size);
/* Try sealing the global file; expect EBUSY or success. Current
* kernels will never succeed, but in the future, kernels might
@@ -228,7 +226,7 @@ static void join_sealing_thread(pid_t pid)
int main(int argc, char **argv)
{
- static const char zero[MFD_DEF_SIZE];
+ char *zero;
int fd, mfd, r;
void *p;
int was_sealed;
@@ -239,6 +237,25 @@ int main(int argc, char **argv)
abort();
}
+ if (argc >= 3) {
+ if (!strcmp(argv[2], "hugetlbfs")) {
+ unsigned long hpage_size = default_huge_page_size();
+
+ if (!hpage_size) {
+ printf("Unable to determine huge page size\n");
+ abort();
+ }
+
+ hugetlbfs_test = 1;
+ mfd_def_size = hpage_size * 2;
+ } else {
+ printf("Unknown option: %s\n", argv[2]);
+ abort();
+ }
+ }
+
+ zero = calloc(sizeof(*zero), mfd_def_size);
+
/* open FUSE memfd file for GUP testing */
printf("opening: %s\n", argv[1]);
fd = open(argv[1], O_RDONLY | O_CLOEXEC);
@@ -249,7 +266,7 @@ int main(int argc, char **argv)
/* create new memfd-object */
mfd = mfd_assert_new("kern_memfd_fuse",
- MFD_DEF_SIZE,
+ mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
/* mmap memfd-object for writing */
@@ -268,7 +285,7 @@ int main(int argc, char **argv)
* This guarantees that the receive-buffer is pinned for 1s until the
* data is written into it. The racing ADD_SEALS should thus fail as
* the pages are still pinned. */
- r = read(fd, p, MFD_DEF_SIZE);
+ r = read(fd, p, mfd_def_size);
if (r < 0) {
printf("read() failed: %m\n");
abort();
@@ -295,10 +312,10 @@ int main(int argc, char **argv)
* enough to avoid any in-flight writes. */
p = mfd_assert_mmap_private(mfd);
- if (was_sealed && memcmp(p, zero, MFD_DEF_SIZE)) {
+ if (was_sealed && memcmp(p, zero, mfd_def_size)) {
printf("memfd sealed during read() but data not discarded\n");
abort();
- } else if (!was_sealed && !memcmp(p, zero, MFD_DEF_SIZE)) {
+ } else if (!was_sealed && !memcmp(p, zero, mfd_def_size)) {
printf("memfd sealed after read() but data discarded\n");
abort();
}
@@ -307,6 +324,7 @@ int main(int argc, char **argv)
close(fd);
printf("fuse: DONE\n");
+ free(zero);
return 0;
}
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 132a54f74e88..10baa1652fc2 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -19,7 +19,10 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "common.h"
+
#define MEMFD_STR "memfd:"
+#define MEMFD_HUGE_STR "memfd-hugetlb:"
#define SHARED_FT_STR "(shared file-table)"
#define MFD_DEF_SIZE 8192
@@ -28,41 +31,8 @@
/*
* Default is not to test hugetlbfs
*/
-static int hugetlbfs_test;
static size_t mfd_def_size = MFD_DEF_SIZE;
-
-/*
- * Copied from mlock2-tests.c
- */
-static unsigned long default_huge_page_size(void)
-{
- unsigned long hps = 0;
- char *line = NULL;
- size_t linelen = 0;
- FILE *f = fopen("/proc/meminfo", "r");
-
- if (!f)
- return 0;
- while (getline(&line, &linelen, f) > 0) {
- if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
- hps <<= 10;
- break;
- }
- }
-
- free(line);
- fclose(f);
- return hps;
-}
-
-static int sys_memfd_create(const char *name,
- unsigned int flags)
-{
- if (hugetlbfs_test)
- flags |= MFD_HUGETLB;
-
- return syscall(__NR_memfd_create, name, flags);
-}
+static const char *memfd_str = MEMFD_STR;
static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
{
@@ -513,6 +483,10 @@ static void mfd_assert_grow_write(int fd)
static char *buf;
ssize_t l;
+ /* hugetlbfs does not support write */
+ if (hugetlbfs_test)
+ return;
+
buf = malloc(mfd_def_size * 8);
if (!buf) {
printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
@@ -533,6 +507,10 @@ static void mfd_fail_grow_write(int fd)
static char *buf;
ssize_t l;
+ /* hugetlbfs does not support write */
+ if (hugetlbfs_test)
+ return;
+
buf = malloc(mfd_def_size * 8);
if (!buf) {
printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
@@ -598,7 +576,7 @@ static void test_create(void)
char buf[2048];
int fd;
- printf("%s CREATE\n", MEMFD_STR);
+ printf("%s CREATE\n", memfd_str);
/* test NULL name */
mfd_fail_new(NULL, 0);
@@ -627,18 +605,13 @@ static void test_create(void)
fd = mfd_assert_new("", 0, MFD_CLOEXEC);
close(fd);
- if (!hugetlbfs_test) {
- /* verify MFD_ALLOW_SEALING is allowed */
- fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING);
- close(fd);
-
- /* verify MFD_ALLOW_SEALING | MFD_CLOEXEC is allowed */
- fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING | MFD_CLOEXEC);
- close(fd);
- } else {
- /* sealing is not supported on hugetlbfs */
- mfd_fail_new("", MFD_ALLOW_SEALING);
- }
+ /* verify MFD_ALLOW_SEALING is allowed */
+ fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING);
+ close(fd);
+
+ /* verify MFD_ALLOW_SEALING | MFD_CLOEXEC is allowed */
+ fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ close(fd);
}
/*
@@ -649,11 +622,7 @@ static void test_basic(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s BASIC\n", MEMFD_STR);
+ printf("%s BASIC\n", memfd_str);
fd = mfd_assert_new("kern_memfd_basic",
mfd_def_size,
@@ -698,28 +667,6 @@ static void test_basic(void)
}
/*
- * hugetlbfs doesn't support seals or write, so just verify grow and shrink
- * on a hugetlbfs file created via memfd_create.
- */
-static void test_hugetlbfs_grow_shrink(void)
-{
- int fd;
-
- printf("%s HUGETLBFS-GROW-SHRINK\n", MEMFD_STR);
-
- fd = mfd_assert_new("kern_memfd_seal_write",
- mfd_def_size,
- MFD_CLOEXEC);
-
- mfd_assert_read(fd);
- mfd_assert_write(fd);
- mfd_assert_shrink(fd);
- mfd_assert_grow(fd);
-
- close(fd);
-}
-
-/*
* Test SEAL_WRITE
* Test whether SEAL_WRITE actually prevents modifications.
*/
@@ -727,14 +674,7 @@ static void test_seal_write(void)
{
int fd;
- /*
- * hugetlbfs does not contain sealing or write support. Just test
- * basic grow and shrink via test_hugetlbfs_grow_shrink.
- */
- if (hugetlbfs_test)
- return test_hugetlbfs_grow_shrink();
-
- printf("%s SEAL-WRITE\n", MEMFD_STR);
+ printf("%s SEAL-WRITE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_write",
mfd_def_size,
@@ -760,11 +700,7 @@ static void test_seal_shrink(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s SEAL-SHRINK\n", MEMFD_STR);
+ printf("%s SEAL-SHRINK\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_shrink",
mfd_def_size,
@@ -790,11 +726,7 @@ static void test_seal_grow(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s SEAL-GROW\n", MEMFD_STR);
+ printf("%s SEAL-GROW\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_grow",
mfd_def_size,
@@ -820,11 +752,7 @@ static void test_seal_resize(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s SEAL-RESIZE\n", MEMFD_STR);
+ printf("%s SEAL-RESIZE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_resize",
mfd_def_size,
@@ -843,32 +771,6 @@ static void test_seal_resize(void)
}
/*
- * hugetlbfs does not support seals. Basic test to dup the memfd created
- * fd and perform some basic operations on it.
- */
-static void hugetlbfs_dup(char *b_suffix)
-{
- int fd, fd2;
-
- printf("%s HUGETLBFS-DUP %s\n", MEMFD_STR, b_suffix);
-
- fd = mfd_assert_new("kern_memfd_share_dup",
- mfd_def_size,
- MFD_CLOEXEC);
-
- fd2 = mfd_assert_dup(fd);
-
- mfd_assert_read(fd);
- mfd_assert_write(fd);
-
- mfd_assert_shrink(fd2);
- mfd_assert_grow(fd2);
-
- close(fd2);
- close(fd);
-}
-
-/*
* Test sharing via dup()
* Test that seals are shared between dupped FDs and they're all equal.
*/
@@ -876,16 +778,7 @@ static void test_share_dup(char *banner, char *b_suffix)
{
int fd, fd2;
- /*
- * hugetlbfs does not contain sealing support. Perform some
- * basic testing on dup'ed fd instead via hugetlbfs_dup.
- */
- if (hugetlbfs_test) {
- hugetlbfs_dup(b_suffix);
- return;
- }
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_dup",
mfd_def_size,
@@ -927,11 +820,7 @@ static void test_share_mmap(char *banner, char *b_suffix)
int fd;
void *p;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_mmap",
mfd_def_size,
@@ -956,32 +845,6 @@ static void test_share_mmap(char *banner, char *b_suffix)
}
/*
- * Basic test to make sure we can open the hugetlbfs fd via /proc and
- * perform some simple operations on it.
- */
-static void hugetlbfs_proc_open(char *b_suffix)
-{
- int fd, fd2;
-
- printf("%s HUGETLBFS-PROC-OPEN %s\n", MEMFD_STR, b_suffix);
-
- fd = mfd_assert_new("kern_memfd_share_open",
- mfd_def_size,
- MFD_CLOEXEC);
-
- fd2 = mfd_assert_open(fd, O_RDWR, 0);
-
- mfd_assert_read(fd);
- mfd_assert_write(fd);
-
- mfd_assert_shrink(fd2);
- mfd_assert_grow(fd2);
-
- close(fd2);
- close(fd);
-}
-
-/*
* Test sealing with open(/proc/self/fd/%d)
* Via /proc we can get access to a separate file-context for the same memfd.
* This is *not* like dup(), but like a real separate open(). Make sure the
@@ -991,16 +854,7 @@ static void test_share_open(char *banner, char *b_suffix)
{
int fd, fd2;
- /*
- * hugetlbfs does not contain sealing support. So test basic
- * functionality of using /proc fd via hugetlbfs_proc_open
- */
- if (hugetlbfs_test) {
- hugetlbfs_proc_open(b_suffix);
- return;
- }
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_open",
mfd_def_size,
@@ -1043,11 +897,7 @@ static void test_share_fork(char *banner, char *b_suffix)
int fd;
pid_t pid;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_fork",
mfd_def_size,
@@ -1083,7 +933,11 @@ int main(int argc, char **argv)
}
hugetlbfs_test = 1;
+ memfd_str = MEMFD_HUGE_STR;
mfd_def_size = hpage_size * 2;
+ } else {
+ printf("Unknown option: %s\n", argv[1]);
+ abort();
}
}
diff --git a/tools/testing/selftests/memfd/run_fuse_test.sh b/tools/testing/selftests/memfd/run_fuse_test.sh
index 407df68dfe27..22e572e2d66a 100755
--- a/tools/testing/selftests/memfd/run_fuse_test.sh
+++ b/tools/testing/selftests/memfd/run_fuse_test.sh
@@ -10,6 +10,6 @@ set -e
mkdir mnt
./fuse_mnt ./mnt
-./fuse_test ./mnt/memfd
+./fuse_test ./mnt/memfd $@
fusermount -u ./mnt
rmdir ./mnt
diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh
index daabb350697c..c2d41ed81b24 100755
--- a/tools/testing/selftests/memfd/run_tests.sh
+++ b/tools/testing/selftests/memfd/run_tests.sh
@@ -60,6 +60,7 @@ fi
# Run the hugetlbfs test
#
./memfd_test hugetlbfs
+./run_fuse_test.sh hugetlbfs
#
# Give back any huge pages allocated for the test
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 500c74db746c..d7c30d366935 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,6 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS += fib_tests.sh
TEST_GEN_FILES = socket
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index e57b4ac40e72..7177bea1fdfa 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,3 +1,4 @@
CONFIG_USER_NS=y
CONFIG_BPF_SYSCALL=y
CONFIG_TEST_BPF=m
+CONFIG_NUMA=y
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
new file mode 100755
index 000000000000..a9154eefb2e2
--- /dev/null
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -0,0 +1,429 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking IPv4 and IPv6 FIB behavior in response to
+# different events.
+
+ret=0
+
+check_err()
+{
+ if [ $ret -eq 0 ]; then
+ ret=$1
+ fi
+}
+
+check_fail()
+{
+ if [ $1 -eq 0 ]; then
+ ret=1
+ fi
+}
+
+netns_create()
+{
+ local testns=$1
+
+ ip netns add $testns
+ ip netns exec $testns ip link set dev lo up
+}
+
+fib_unreg_unicast_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy0
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_fail $?
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: unicast route test"
+ return 1
+ fi
+ echo "PASS: unicast route test"
+}
+
+fib_unreg_multipath_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link add dummy1 type dummy
+ ip netns exec testns ip link set dev dummy1 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+ ip netns exec testns ip route add 203.0.113.0/24 \
+ nexthop via 198.51.100.2 dev dummy0 \
+ nexthop via 192.0.2.2 dev dummy1
+ ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+ nexthop via 2001:db8:1::2 dev dummy0 \
+ nexthop via 2001:db8:2::2 dev dummy1
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy0
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ # In IPv6 we do not flush the entire multipath route.
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy1
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: multipath route test"
+ return 1
+ fi
+ echo "PASS: multipath route test"
+}
+
+fib_unreg_test()
+{
+ echo "Running netdev unregister tests"
+
+ fib_unreg_unicast_test
+ fib_unreg_multipath_test
+}
+
+fib_down_unicast_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link set dev dummy0 down
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_fail $?
+
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: unicast route test"
+ return 1
+ fi
+ echo "PASS: unicast route test"
+}
+
+fib_down_multipath_test_do()
+{
+ local down_dev=$1
+ local up_dev=$2
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 \
+ oif $down_dev &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+ oif $down_dev &> /dev/null
+ check_fail $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 \
+ oif $up_dev &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+ oif $up_dev &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+ grep $down_dev | grep -q "dead linkdown"
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+ grep $down_dev | grep -q "dead linkdown"
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+ grep $up_dev | grep -q "dead linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+ grep $up_dev | grep -q "dead linkdown"
+ check_fail $?
+}
+
+fib_down_multipath_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link add dummy1 type dummy
+ ip netns exec testns ip link set dev dummy1 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+ ip netns exec testns ip route add 203.0.113.0/24 \
+ nexthop via 198.51.100.2 dev dummy0 \
+ nexthop via 192.0.2.2 dev dummy1
+ ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+ nexthop via 2001:db8:1::2 dev dummy0 \
+ nexthop via 2001:db8:2::2 dev dummy1
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link set dev dummy0 down
+ check_err $?
+
+ fib_down_multipath_test_do "dummy0" "dummy1"
+
+ ip netns exec testns ip link set dev dummy0 up
+ check_err $?
+ ip netns exec testns ip link set dev dummy1 down
+ check_err $?
+
+ fib_down_multipath_test_do "dummy1" "dummy0"
+
+ ip netns exec testns ip link set dev dummy0 down
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ check_fail $?
+
+ ip netns exec testns ip link del dev dummy1
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: multipath route test"
+ return 1
+ fi
+ echo "PASS: multipath route test"
+}
+
+fib_down_test()
+{
+ echo "Running netdev down tests"
+
+ fib_down_unicast_test
+ fib_down_multipath_test
+}
+
+fib_carrier_local_test()
+{
+ ret=0
+
+ # Local routes should not be affected when carrier changes.
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link set dev dummy0 carrier on
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip link set dev dummy0 carrier off
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 192.0.2.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 192.0.2.1 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: local route carrier test"
+ return 1
+ fi
+ echo "PASS: local route carrier test"
+}
+
+fib_carrier_unicast_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link set dev dummy0 carrier on
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip link set dev dummy0 carrier off
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+ grep -q "linkdown"
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+ grep -q "linkdown"
+ check_err $?
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 192.0.2.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 192.0.2.2 | \
+ grep -q "linkdown"
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 | \
+ grep -q "linkdown"
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: unicast route carrier test"
+ return 1
+ fi
+ echo "PASS: unicast route carrier test"
+}
+
+fib_carrier_test()
+{
+ echo "Running netdev carrier change tests"
+
+ fib_carrier_local_test
+ fib_carrier_unicast_test
+}
+
+fib_test()
+{
+ fib_unreg_test
+ fib_down_test
+ fib_carrier_test
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit 0
+fi
+
+ip route help 2>&1 | grep -q fibmatch
+if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 too old, missing fibmatch"
+ exit 0
+fi
+
+fib_test
+
+exit $ret
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 3ab6ec403905..e11fe84de0fd 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
return sizeof(*ip6h);
}
-static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+
+static void setup_sockaddr(int domain, const char *str_addr,
+ struct sockaddr_storage *sockaddr)
{
struct sockaddr_in6 *addr6 = (void *) sockaddr;
struct sockaddr_in *addr4 = (void *) sockaddr;
switch (domain) {
case PF_INET:
+ memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = AF_INET;
addr4->sin_port = htons(cfg_port);
- if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+ if (str_addr &&
+ inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
error(1, 0, "ipv4 parse error: %s", str_addr);
break;
case PF_INET6:
+ memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = AF_INET6;
addr6->sin6_port = htons(cfg_port);
- if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+ if (str_addr &&
+ inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
error(1, 0, "ipv6 parse error: %s", str_addr);
break;
default:
@@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
sizeof(struct tcphdr) -
40 /* max tcp options */;
int c;
+ char *daddr = NULL, *saddr = NULL;
cfg_payload_len = max_payload_len;
@@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
cfg_cpu = strtol(optarg, NULL, 0);
break;
case 'D':
- setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+ daddr = optarg;
break;
case 'i':
cfg_ifindex = if_nametoindex(optarg);
@@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
cfg_cork_mixed = true;
break;
case 'p':
- cfg_port = htons(strtoul(optarg, NULL, 0));
+ cfg_port = strtoul(optarg, NULL, 0);
break;
case 'r':
cfg_rx = true;
@@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
cfg_payload_len = strtoul(optarg, NULL, 0);
break;
case 'S':
- setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
+ saddr = optarg;
break;
case 't':
cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
@@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
break;
}
}
+ setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
+ setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
if (cfg_payload_len > max_payload_len)
error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 4a8217448f20..cad14cd0ea92 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -21,6 +21,7 @@
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/socket.h>
+#include <sys/resource.h>
#include <unistd.h>
#ifndef ARRAY_SIZE
@@ -190,11 +191,14 @@ static void send_from(struct test_params p, uint16_t sport, char *buf,
struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
struct sockaddr * const daddr =
new_loopback_sockaddr(p.send_family, p.recv_port);
- const int fd = socket(p.send_family, p.protocol, 0);
+ const int fd = socket(p.send_family, p.protocol, 0), one = 1;
if (fd < 0)
error(1, errno, "failed to create send socket");
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)))
+ error(1, errno, "failed to set reuseaddr");
+
if (bind(fd, saddr, sockaddr_size()))
error(1, errno, "failed to bind send socket");
@@ -433,6 +437,21 @@ void enable_fastopen(void)
}
}
+static struct rlimit rlim_old, rlim_new;
+
+static __attribute__((constructor)) void main_ctor(void)
+{
+ getrlimit(RLIMIT_MEMLOCK, &rlim_old);
+ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+}
+
+static __attribute__((destructor)) void main_dtor(void)
+{
+ setrlimit(RLIMIT_MEMLOCK, &rlim_old);
+}
+
int main(void)
{
fprintf(stderr, "---- IPv4 UDP ----\n");
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 5215493166c9..a622eeecc3a6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -502,6 +502,231 @@ kci_test_macsec()
echo "PASS: macsec"
}
+kci_test_gretap()
+{
+ testns="testns"
+ DEV_NS=gretap00
+ ret=0
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP gretap tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ ip link help gretap 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: gretap: iproute2 too old"
+ return 1
+ fi
+
+ # test native tunnel
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+ key 102 local 172.16.1.100 remote 172.16.1.200
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: gretap"
+ return 1
+ fi
+ echo "PASS: gretap"
+
+ ip netns del "$testns"
+}
+
+kci_test_ip6gretap()
+{
+ testns="testns"
+ DEV_NS=ip6gretap00
+ ret=0
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: ip6gretap: iproute2 too old"
+ return 1
+ fi
+
+ # test native tunnel
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+ key 102 local fc00:100::1 remote fc00:100::2
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ip6gretap"
+ return 1
+ fi
+ echo "PASS: ip6gretap"
+
+ ip netns del "$testns"
+}
+
+kci_test_erspan()
+{
+ testns="testns"
+ DEV_NS=erspan00
+ ret=0
+
+ ip link help erspan 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: erspan: iproute2 too old"
+ return 1
+ fi
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP erspan tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ # test native tunnel erspan v1
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ key 102 local 172.16.1.100 remote 172.16.1.200 \
+ erspan_ver 1 erspan 488
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test native tunnel erspan v2
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ key 102 local 172.16.1.100 remote 172.16.1.200 \
+ erspan_ver 2 erspan_dir ingress erspan_hwid 7
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: erspan"
+ return 1
+ fi
+ echo "PASS: erspan"
+
+ ip netns del "$testns"
+}
+
+kci_test_ip6erspan()
+{
+ testns="testns"
+ DEV_NS=ip6erspan00
+ ret=0
+
+ ip link help ip6erspan 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: ip6erspan: iproute2 too old"
+ return 1
+ fi
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP ip6erspan tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ # test native tunnel ip6erspan v1
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ key 102 local fc00:100::1 remote fc00:100::2 \
+ erspan_ver 1 erspan 488
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test native tunnel ip6erspan v2
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ key 102 local fc00:100::1 remote fc00:100::2 \
+ erspan_ver 2 erspan_dir ingress erspan_hwid 7
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" \
+ type ip6erspan external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ip6erspan"
+ return 1
+ fi
+ echo "PASS: ip6erspan"
+
+ ip netns del "$testns"
+}
+
kci_test_rtnl()
{
kci_add_dummy
@@ -514,6 +739,10 @@ kci_test_rtnl()
kci_test_route_get
kci_test_tc
kci_test_gre
+ kci_test_gretap
+ kci_test_ip6gretap
+ kci_test_erspan
+ kci_test_ip6erspan
kci_test_bridge
kci_test_addrlabel
kci_test_ifalias
diff --git a/tools/testing/selftests/nsfs/pidns.c b/tools/testing/selftests/nsfs/pidns.c
index 1182d4e437a2..e0d86e1668c0 100644
--- a/tools/testing/selftests/nsfs/pidns.c
+++ b/tools/testing/selftests/nsfs/pidns.c
@@ -70,7 +70,7 @@ int main(int argc, char *argv[])
return pr_err("NS_GET_PARENT returned a wrong namespace");
if (ioctl(pns, NS_GET_PARENT) >= 0 || errno != EPERM)
- return pr_err("Don't get EPERM");;
+ return pr_err("Don't get EPERM");
}
kill(pid, SIGKILL);
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index 5fc7ad359e21..08cbfbbc7029 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -18,7 +18,6 @@ LIST_DEVS=FALSE
DEBUGFS=${DEBUGFS-/sys/kernel/debug}
-DB_BITMASK=0x7FFF
PERF_RUN_ORDER=32
MAX_MW_SIZE=0
RUN_DMA_TESTS=
@@ -39,15 +38,17 @@ function show_help()
echo "be highly recommended."
echo
echo "Options:"
- echo " -b BITMASK doorbell clear bitmask for ntb_tool"
echo " -C don't cleanup ntb modules on exit"
- echo " -d run dma tests"
echo " -h show this help message"
echo " -l list available local and remote PCI ids"
echo " -r REMOTE_HOST specify the remote's hostname to connect"
- echo " to for the test (using ssh)"
- echo " -p NUM ntb_perf run order (default: $PERF_RUN_ORDER)"
- echo " -w max_mw_size maxmium memory window size"
+ echo " to for the test (using ssh)"
+ echo " -m MW_SIZE memory window size for ntb_tool"
+ echo " (default: $MW_SIZE)"
+ echo " -d run dma tests for ntb_perf"
+ echo " -p ORDER total data order for ntb_perf"
+ echo " (default: $PERF_RUN_ORDER)"
+ echo " -w MAX_MW_SIZE maxmium memory window size for ntb_perf"
echo
}
@@ -56,7 +57,6 @@ function parse_args()
OPTIND=0
while getopts "b:Cdhlm:r:p:w:" opt; do
case "$opt" in
- b) DB_BITMASK=${OPTARG} ;;
C) DONT_CLEANUP=1 ;;
d) RUN_DMA_TESTS=1 ;;
h) show_help; exit 0 ;;
@@ -87,7 +87,7 @@ set -e
function _modprobe()
{
- modprobe "$@"
+ modprobe "$@"
if [[ "$REMOTE_HOST" != "" ]]; then
ssh "$REMOTE_HOST" modprobe "$@"
@@ -127,15 +127,70 @@ function write_file()
fi
}
+function check_file()
+{
+ split_remote $1
+
+ if [[ "$REMOTE" != "" ]]; then
+ ssh "$REMOTE" "[[ -e ${VPATH} ]]"
+ else
+ [[ -e ${VPATH} ]]
+ fi
+}
+
+function subdirname()
+{
+ echo $(basename $(dirname $1)) 2> /dev/null
+}
+
+function find_pidx()
+{
+ PORT=$1
+ PPATH=$2
+
+ for ((i = 0; i < 64; i++)); do
+ PEER_DIR="$PPATH/peer$i"
+
+ check_file ${PEER_DIR} || break
+
+ PEER_PORT=$(read_file "${PEER_DIR}/port")
+ if [[ ${PORT} -eq $PEER_PORT ]]; then
+ echo $i
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function port_test()
+{
+ LOC=$1
+ REM=$2
+
+ echo "Running port tests on: $(basename $LOC) / $(basename $REM)"
+
+ LOCAL_PORT=$(read_file "$LOC/port")
+ REMOTE_PORT=$(read_file "$REM/port")
+
+ LOCAL_PIDX=$(find_pidx ${REMOTE_PORT} "$LOC")
+ REMOTE_PIDX=$(find_pidx ${LOCAL_PORT} "$REM")
+
+ echo "Local port ${LOCAL_PORT} with index ${REMOTE_PIDX} on remote host"
+ echo "Peer port ${REMOTE_PORT} with index ${LOCAL_PIDX} on local host"
+
+ echo " Passed"
+}
+
function link_test()
{
LOC=$1
REM=$2
EXP=0
- echo "Running link tests on: $(basename $LOC) / $(basename $REM)"
+ echo "Running link tests on: $(subdirname $LOC) / $(subdirname $REM)"
- if ! write_file "N" "$LOC/link" 2> /dev/null; then
+ if ! write_file "N" "$LOC/../link" 2> /dev/null; then
echo " Unsupported"
return
fi
@@ -143,12 +198,11 @@ function link_test()
write_file "N" "$LOC/link_event"
if [[ $(read_file "$REM/link") != "N" ]]; then
- echo "Expected remote link to be down in $REM/link" >&2
+ echo "Expected link to be down in $REM/link" >&2
exit -1
fi
- write_file "Y" "$LOC/link"
- write_file "Y" "$LOC/link_event"
+ write_file "Y" "$LOC/../link"
echo " Passed"
}
@@ -161,58 +215,136 @@ function doorbell_test()
echo "Running db tests on: $(basename $LOC) / $(basename $REM)"
- write_file "c $DB_BITMASK" "$REM/db"
+ DB_VALID_MASK=$(read_file "$LOC/db_valid_mask")
+
+ write_file "c $DB_VALID_MASK" "$REM/db"
- for ((i=1; i <= 8; i++)); do
- let DB=$(read_file "$REM/db") || true
- if [[ "$DB" != "$EXP" ]]; then
+ for ((i = 0; i < 64; i++)); do
+ DB=$(read_file "$REM/db")
+ if [[ "$DB" -ne "$EXP" ]]; then
echo "Doorbell doesn't match expected value $EXP " \
"in $REM/db" >&2
exit -1
fi
- let "MASK=1 << ($i-1)" || true
- let "EXP=$EXP | $MASK" || true
+ let "MASK = (1 << $i) & $DB_VALID_MASK" || true
+ let "EXP = $EXP | $MASK" || true
+
write_file "s $MASK" "$LOC/peer_db"
done
+ write_file "c $DB_VALID_MASK" "$REM/db_mask"
+ write_file $DB_VALID_MASK "$REM/db_event"
+ write_file "s $DB_VALID_MASK" "$REM/db_mask"
+
+ write_file "c $DB_VALID_MASK" "$REM/db"
+
echo " Passed"
}
-function read_spad()
+function get_files_count()
{
- VPATH=$1
- IDX=$2
+ NAME=$1
+ LOC=$2
+
+ split_remote $LOC
- ROW=($(read_file "$VPATH" | grep -e "^$IDX"))
- let VAL=${ROW[1]} || true
- echo $VAL
+ if [[ "$REMOTE" == "" ]]; then
+ echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l)
+ else
+ echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \
+ wc -l" 2> /dev/null)
+ fi
}
function scratchpad_test()
{
LOC=$1
REM=$2
- CNT=$(read_file "$LOC/spad" | wc -l)
- echo "Running spad tests on: $(basename $LOC) / $(basename $REM)"
+ echo "Running spad tests on: $(subdirname $LOC) / $(subdirname $REM)"
+
+ CNT=$(get_files_count "spad" "$LOC")
+
+ if [[ $CNT -eq 0 ]]; then
+ echo " Unsupported"
+ return
+ fi
for ((i = 0; i < $CNT; i++)); do
VAL=$RANDOM
- write_file "$i $VAL" "$LOC/peer_spad"
- RVAL=$(read_spad "$REM/spad" $i)
+ write_file "$VAL" "$LOC/spad$i"
+ RVAL=$(read_file "$REM/../spad$i")
- if [[ "$VAL" != "$RVAL" ]]; then
- echo "Scratchpad doesn't match expected value $VAL " \
- "in $REM/spad, got $RVAL" >&2
+ if [[ "$VAL" -ne "$RVAL" ]]; then
+ echo "Scratchpad $i value $RVAL doesn't match $VAL" >&2
exit -1
fi
+ done
+
+ echo " Passed"
+}
+
+function message_test()
+{
+ LOC=$1
+ REM=$2
+
+ echo "Running msg tests on: $(subdirname $LOC) / $(subdirname $REM)"
+
+ CNT=$(get_files_count "msg" "$LOC")
+ if [[ $CNT -eq 0 ]]; then
+ echo " Unsupported"
+ return
+ fi
+
+ MSG_OUTBITS_MASK=$(read_file "$LOC/../msg_inbits")
+ MSG_INBITS_MASK=$(read_file "$REM/../msg_inbits")
+
+ write_file "c $MSG_OUTBITS_MASK" "$LOC/../msg_sts"
+ write_file "c $MSG_INBITS_MASK" "$REM/../msg_sts"
+
+ for ((i = 0; i < $CNT; i++)); do
+ VAL=$RANDOM
+ write_file "$VAL" "$LOC/msg$i"
+ RVAL=$(read_file "$REM/../msg$i")
+
+ if [[ "$VAL" -ne "${RVAL%%<-*}" ]]; then
+ echo "Message $i value $RVAL doesn't match $VAL" >&2
+ exit -1
+ fi
done
echo " Passed"
}
+function get_number()
+{
+ KEY=$1
+
+ sed -n "s/^\(${KEY}\)[ \t]*\(0x[0-9a-fA-F]*\)\(\[p\]\)\?$/\2/p"
+}
+
+function mw_alloc()
+{
+ IDX=$1
+ LOC=$2
+ REM=$3
+
+ write_file $MW_SIZE "$LOC/mw_trans$IDX"
+
+ INB_MW=$(read_file "$LOC/mw_trans$IDX")
+ MW_ALIGNED_SIZE=$(echo "$INB_MW" | get_number "Window Size")
+ MW_DMA_ADDR=$(echo "$INB_MW" | get_number "DMA Address")
+
+ write_file "$MW_DMA_ADDR:$(($MW_ALIGNED_SIZE))" "$REM/peer_mw_trans$IDX"
+
+ if [[ $MW_SIZE -ne $MW_ALIGNED_SIZE ]]; then
+ echo "MW $IDX size aligned to $MW_ALIGNED_SIZE"
+ fi
+}
+
function write_mw()
{
split_remote $2
@@ -225,17 +357,15 @@ function write_mw()
fi
}
-function mw_test()
+function mw_check()
{
IDX=$1
LOC=$2
REM=$3
- echo "Running $IDX tests on: $(basename $LOC) / $(basename $REM)"
+ write_mw "$LOC/mw$IDX"
- write_mw "$LOC/$IDX"
-
- split_remote "$LOC/$IDX"
+ split_remote "$LOC/mw$IDX"
if [[ "$REMOTE" == "" ]]; then
A=$VPATH
else
@@ -243,7 +373,7 @@ function mw_test()
ssh "$REMOTE" cat "$VPATH" > "$A"
fi
- split_remote "$REM/peer_$IDX"
+ split_remote "$REM/peer_mw$IDX"
if [[ "$REMOTE" == "" ]]; then
B=$VPATH
else
@@ -251,7 +381,7 @@ function mw_test()
ssh "$REMOTE" cat "$VPATH" > "$B"
fi
- cmp -n $MW_SIZE "$A" "$B"
+ cmp -n $MW_ALIGNED_SIZE "$A" "$B"
if [[ $? != 0 ]]; then
echo "Memory window $MW did not match!" >&2
fi
@@ -263,8 +393,39 @@ function mw_test()
if [[ "$B" == "/tmp/*" ]]; then
rm "$B"
fi
+}
+
+function mw_free()
+{
+ IDX=$1
+ LOC=$2
+ REM=$3
+
+ write_file "$MW_DMA_ADDR:0" "$REM/peer_mw_trans$IDX"
+
+ write_file 0 "$LOC/mw_trans$IDX"
+}
+
+function mw_test()
+{
+ LOC=$1
+ REM=$2
+
+ CNT=$(get_files_count "mw_trans" "$LOC")
+
+ for ((i = 0; i < $CNT; i++)); do
+ echo "Running mw$i tests on: $(subdirname $LOC) / " \
+ "$(subdirname $REM)"
+
+ mw_alloc $i $LOC $REM
+
+ mw_check $i $LOC $REM
+
+ mw_free $i $LOC $REM
+
+ echo " Passed"
+ done
- echo " Passed"
}
function pingpong_test()
@@ -274,13 +435,13 @@ function pingpong_test()
echo "Running ping pong tests on: $(basename $LOC) / $(basename $REM)"
- LOC_START=$(read_file $LOC/count)
- REM_START=$(read_file $REM/count)
+ LOC_START=$(read_file "$LOC/count")
+ REM_START=$(read_file "$REM/count")
sleep 7
- LOC_END=$(read_file $LOC/count)
- REM_END=$(read_file $REM/count)
+ LOC_END=$(read_file "$LOC/count")
+ REM_END=$(read_file "$REM/count")
if [[ $LOC_START == $LOC_END ]] || [[ $REM_START == $REM_END ]]; then
echo "Ping pong counter not incrementing!" >&2
@@ -300,19 +461,19 @@ function perf_test()
WITH="without"
fi
- _modprobe ntb_perf run_order=$PERF_RUN_ORDER \
+ _modprobe ntb_perf total_order=$PERF_RUN_ORDER \
max_mw_size=$MAX_MW_SIZE use_dma=$USE_DMA
echo "Running local perf test $WITH DMA"
- write_file "" $LOCAL_PERF/run
+ write_file "$LOCAL_PIDX" "$LOCAL_PERF/run"
echo -n " "
- read_file $LOCAL_PERF/run
+ read_file "$LOCAL_PERF/run"
echo " Passed"
echo "Running remote perf test $WITH DMA"
- write_file "" $REMOTE_PERF/run
+ write_file "$REMOTE_PIDX" "$REMOTE_PERF/run"
echo -n " "
- read_file $REMOTE_PERF/run
+ read_file "$REMOTE_PERF/run"
echo " Passed"
_modprobe -r ntb_perf
@@ -320,48 +481,44 @@ function perf_test()
function ntb_tool_tests()
{
- LOCAL_TOOL=$DEBUGFS/ntb_tool/$LOCAL_DEV
- REMOTE_TOOL=$REMOTE_HOST:$DEBUGFS/ntb_tool/$REMOTE_DEV
+ LOCAL_TOOL="$DEBUGFS/ntb_tool/$LOCAL_DEV"
+ REMOTE_TOOL="$REMOTE_HOST:$DEBUGFS/ntb_tool/$REMOTE_DEV"
echo "Starting ntb_tool tests..."
_modprobe ntb_tool
- write_file Y $LOCAL_TOOL/link_event
- write_file Y $REMOTE_TOOL/link_event
+ port_test "$LOCAL_TOOL" "$REMOTE_TOOL"
- link_test $LOCAL_TOOL $REMOTE_TOOL
- link_test $REMOTE_TOOL $LOCAL_TOOL
+ LOCAL_PEER_TOOL="$LOCAL_TOOL/peer$LOCAL_PIDX"
+ REMOTE_PEER_TOOL="$REMOTE_TOOL/peer$REMOTE_PIDX"
+
+ link_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
+ link_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
#Ensure the link is up on both sides before continuing
- write_file Y $LOCAL_TOOL/link_event
- write_file Y $REMOTE_TOOL/link_event
+ write_file "Y" "$LOCAL_PEER_TOOL/link_event"
+ write_file "Y" "$REMOTE_PEER_TOOL/link_event"
- for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
- PT=$(basename $PEER_TRANS)
- write_file $MW_SIZE $LOCAL_TOOL/$PT
- write_file $MW_SIZE $REMOTE_TOOL/$PT
- done
+ doorbell_test "$LOCAL_TOOL" "$REMOTE_TOOL"
+ doorbell_test "$REMOTE_TOOL" "$LOCAL_TOOL"
- doorbell_test $LOCAL_TOOL $REMOTE_TOOL
- doorbell_test $REMOTE_TOOL $LOCAL_TOOL
- scratchpad_test $LOCAL_TOOL $REMOTE_TOOL
- scratchpad_test $REMOTE_TOOL $LOCAL_TOOL
+ scratchpad_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
+ scratchpad_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
- for MW in $(ls $LOCAL_TOOL/mw*); do
- MW=$(basename $MW)
+ message_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
+ message_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
- mw_test $MW $LOCAL_TOOL $REMOTE_TOOL
- mw_test $MW $REMOTE_TOOL $LOCAL_TOOL
- done
+ mw_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
+ mw_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
_modprobe -r ntb_tool
}
function ntb_pingpong_tests()
{
- LOCAL_PP=$DEBUGFS/ntb_pingpong/$LOCAL_DEV
- REMOTE_PP=$REMOTE_HOST:$DEBUGFS/ntb_pingpong/$REMOTE_DEV
+ LOCAL_PP="$DEBUGFS/ntb_pingpong/$LOCAL_DEV"
+ REMOTE_PP="$REMOTE_HOST:$DEBUGFS/ntb_pingpong/$REMOTE_DEV"
echo "Starting ntb_pingpong tests..."
@@ -374,8 +531,8 @@ function ntb_pingpong_tests()
function ntb_perf_tests()
{
- LOCAL_PERF=$DEBUGFS/ntb_perf/$LOCAL_DEV
- REMOTE_PERF=$REMOTE_HOST:$DEBUGFS/ntb_perf/$REMOTE_DEV
+ LOCAL_PERF="$DEBUGFS/ntb_perf/$LOCAL_DEV"
+ REMOTE_PERF="$REMOTE_HOST:$DEBUGFS/ntb_perf/$REMOTE_DEV"
echo "Starting ntb_perf tests..."
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile
index 16b22004e75f..083a48a008b4 100644
--- a/tools/testing/selftests/powerpc/alignment/Makefile
+++ b/tools/testing/selftests/powerpc/alignment/Makefile
@@ -1,4 +1,5 @@
-TEST_GEN_PROGS := copy_unaligned copy_first_unaligned paste_unaligned paste_last_unaligned
+TEST_GEN_PROGS := copy_unaligned copy_first_unaligned paste_unaligned \
+ paste_last_unaligned alignment_handler
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
new file mode 100644
index 000000000000..0f2698f9fd6d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -0,0 +1,491 @@
+/*
+ * Test the powerpc alignment handler on POWER8/POWER9
+ *
+ * Copyright (C) 2017 IBM Corporation (Michael Neuling, Andrew Donnellan)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This selftest exercises the powerpc alignment fault handler.
+ *
+ * We create two sets of source and destination buffers, one in regular memory,
+ * the other cache-inhibited (we use /dev/fb0 for this).
+ *
+ * We initialise the source buffers, then use whichever set of load/store
+ * instructions is under test to copy bytes from the source buffers to the
+ * destination buffers. For the regular buffers, these instructions will
+ * execute normally. For the cache-inhibited buffers, these instructions
+ * will trap and cause an alignment fault, and the alignment fault handler
+ * will emulate the particular instruction under test. We then compare the
+ * destination buffers to ensure that the native and emulated cases give the
+ * same result.
+ *
+ * TODO:
+ * - Any FIXMEs below
+ * - Test VSX regs < 32 and > 32
+ * - Test all loads and stores
+ * - Check update forms do update register
+ * - Test alignment faults over page boundary
+ *
+ * Some old binutils may not support all the instructions.
+ */
+
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <getopt.h>
+#include <setjmp.h>
+#include <signal.h>
+
+#include "utils.h"
+
+int bufsize;
+int debug;
+int testing;
+volatile int gotsig;
+
+void sighandler(int sig, siginfo_t *info, void *ctx)
+{
+ ucontext_t *ucp = ctx;
+
+ if (!testing) {
+ signal(sig, SIG_DFL);
+ kill(0, sig);
+ }
+ gotsig = sig;
+#ifdef __powerpc64__
+ ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
+#else
+ ucp->uc_mcontext.uc_regs->gregs[PT_NIP] += 4;
+#endif
+}
+
+#define XFORM(reg, n) " " #reg " ,%"#n",%2 ;"
+#define DFORM(reg, n) " " #reg " ,0(%"#n") ;"
+
+#define TEST(name, ld_op, st_op, form, ld_reg, st_reg) \
+ void test_##name(char *s, char *d) \
+ { \
+ asm volatile( \
+ #ld_op form(ld_reg, 0) \
+ #st_op form(st_reg, 1) \
+ :: "r"(s), "r"(d), "r"(0) \
+ : "memory", "vs0", "vs32", "r31"); \
+ } \
+ rc |= do_test(#name, test_##name)
+
+#define LOAD_VSX_XFORM_TEST(op) TEST(op, op, stxvd2x, XFORM, 32, 32)
+#define STORE_VSX_XFORM_TEST(op) TEST(op, lxvd2x, op, XFORM, 32, 32)
+#define LOAD_VSX_DFORM_TEST(op) TEST(op, op, stxv, DFORM, 32, 32)
+#define STORE_VSX_DFORM_TEST(op) TEST(op, lxv, op, DFORM, 32, 32)
+#define LOAD_VMX_XFORM_TEST(op) TEST(op, op, stxvd2x, XFORM, 0, 32)
+#define STORE_VMX_XFORM_TEST(op) TEST(op, lxvd2x, op, XFORM, 32, 0)
+#define LOAD_VMX_DFORM_TEST(op) TEST(op, op, stxv, DFORM, 0, 32)
+#define STORE_VMX_DFORM_TEST(op) TEST(op, lxv, op, DFORM, 32, 0)
+
+#define LOAD_XFORM_TEST(op) TEST(op, op, stdx, XFORM, 31, 31)
+#define STORE_XFORM_TEST(op) TEST(op, ldx, op, XFORM, 31, 31)
+#define LOAD_DFORM_TEST(op) TEST(op, op, std, DFORM, 31, 31)
+#define STORE_DFORM_TEST(op) TEST(op, ld, op, DFORM, 31, 31)
+
+#define LOAD_FLOAT_DFORM_TEST(op) TEST(op, op, stfd, DFORM, 0, 0)
+#define STORE_FLOAT_DFORM_TEST(op) TEST(op, lfd, op, DFORM, 0, 0)
+#define LOAD_FLOAT_XFORM_TEST(op) TEST(op, op, stfdx, XFORM, 0, 0)
+#define STORE_FLOAT_XFORM_TEST(op) TEST(op, lfdx, op, XFORM, 0, 0)
+
+
+/* FIXME: Unimplemented tests: */
+// STORE_DFORM_TEST(stq) /* FIXME: need two registers for quad */
+// STORE_DFORM_TEST(stswi) /* FIXME: string instruction */
+
+// STORE_XFORM_TEST(stwat) /* AMO can't emulate or run on CI */
+// STORE_XFORM_TEST(stdat) /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
+
+
+/* preload byte by byte */
+void preload_data(void *dst, int offset, int width)
+{
+ char *c = dst;
+ int i;
+
+ c += offset;
+
+ for (i = 0 ; i < width ; i++)
+ c[i] = i;
+}
+
+int test_memcpy(void *dst, void *src, int size, int offset,
+ void (*test_func)(char *, char *))
+{
+ char *s, *d;
+
+ s = src;
+ s += offset;
+ d = dst;
+ d += offset;
+
+ assert(size == 16);
+ gotsig = 0;
+ testing = 1;
+
+ test_func(s, d); /* run the actual test */
+
+ testing = 0;
+ if (gotsig) {
+ if (debug)
+ printf(" Got signal %i\n", gotsig);
+ return 1;
+ }
+ return 0;
+}
+
+void dumpdata(char *s1, char *s2, int n, char *test_name)
+{
+ int i;
+
+ printf(" %s: unexpected result:\n", test_name);
+ printf(" mem:");
+ for (i = 0; i < n; i++)
+ printf(" %02x", s1[i]);
+ printf("\n");
+ printf(" ci: ");
+ for (i = 0; i < n; i++)
+ printf(" %02x", s2[i]);
+ printf("\n");
+}
+
+int test_memcmp(void *s1, void *s2, int n, int offset, char *test_name)
+{
+ char *s1c, *s2c;
+
+ s1c = s1;
+ s1c += offset;
+ s2c = s2;
+ s2c += offset;
+
+ if (memcmp(s1c, s2c, n)) {
+ if (debug) {
+ printf("\n Compare failed. Offset:%i length:%i\n",
+ offset, n);
+ dumpdata(s1c, s2c, n, test_name);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Do two memcpy tests using the same instructions. One cachable
+ * memory and the other doesn't.
+ */
+int do_test(char *test_name, void (*test_func)(char *, char *))
+{
+ int offset, width, fd, rc = 0, r;
+ void *mem0, *mem1, *ci0, *ci1;
+
+ printf("\tDoing %s:\t", test_name);
+
+ fd = open("/dev/fb0", O_RDWR);
+ if (fd < 0) {
+ printf("\n");
+ perror("Can't open /dev/fb0");
+ SKIP_IF(1);
+ }
+
+ ci0 = mmap(NULL, bufsize, PROT_WRITE, MAP_SHARED,
+ fd, 0x0);
+ ci1 = mmap(NULL, bufsize, PROT_WRITE, MAP_SHARED,
+ fd, bufsize);
+ if ((ci0 == MAP_FAILED) || (ci1 == MAP_FAILED)) {
+ printf("\n");
+ perror("mmap failed");
+ SKIP_IF(1);
+ }
+
+ rc = posix_memalign(&mem0, bufsize, bufsize);
+ if (rc) {
+ printf("\n");
+ return rc;
+ }
+
+ rc = posix_memalign(&mem1, bufsize, bufsize);
+ if (rc) {
+ printf("\n");
+ free(mem0);
+ return rc;
+ }
+
+ /* offset = 0 no alignment fault, so skip */
+ for (offset = 1; offset < 16; offset++) {
+ width = 16; /* vsx == 16 bytes */
+ r = 0;
+
+ /* load pattern into memory byte by byte */
+ preload_data(ci0, offset, width);
+ preload_data(mem0, offset, width); // FIXME: remove??
+ memcpy(ci0, mem0, bufsize);
+ memcpy(ci1, mem1, bufsize); /* initialise output to the same */
+
+ /* sanity check */
+ test_memcmp(mem0, ci0, width, offset, test_name);
+
+ r |= test_memcpy(ci1, ci0, width, offset, test_func);
+ r |= test_memcpy(mem1, mem0, width, offset, test_func);
+ if (r && !debug) {
+ printf("FAILED: Got signal");
+ break;
+ }
+
+ r |= test_memcmp(mem1, ci1, width, offset, test_name);
+ rc |= r;
+ if (r && !debug) {
+ printf("FAILED: Wrong Data");
+ break;
+ }
+ }
+ if (!r)
+ printf("PASSED");
+ printf("\n");
+
+ munmap(ci0, bufsize);
+ munmap(ci1, bufsize);
+ free(mem0);
+ free(mem1);
+
+ return rc;
+}
+
+int test_alignment_handler_vsx_206(void)
+{
+ int rc = 0;
+
+ printf("VSX: 2.06B\n");
+ LOAD_VSX_XFORM_TEST(lxvd2x);
+ LOAD_VSX_XFORM_TEST(lxvw4x);
+ LOAD_VSX_XFORM_TEST(lxsdx);
+ LOAD_VSX_XFORM_TEST(lxvdsx);
+ STORE_VSX_XFORM_TEST(stxvd2x);
+ STORE_VSX_XFORM_TEST(stxvw4x);
+ STORE_VSX_XFORM_TEST(stxsdx);
+ return rc;
+}
+
+int test_alignment_handler_vsx_207(void)
+{
+ int rc = 0;
+
+ printf("VSX: 2.07B\n");
+ LOAD_VSX_XFORM_TEST(lxsspx);
+ LOAD_VSX_XFORM_TEST(lxsiwax);
+ LOAD_VSX_XFORM_TEST(lxsiwzx);
+ STORE_VSX_XFORM_TEST(stxsspx);
+ STORE_VSX_XFORM_TEST(stxsiwx);
+ return rc;
+}
+
+int test_alignment_handler_vsx_300(void)
+{
+ int rc = 0;
+
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
+ printf("VSX: 3.00B\n");
+ LOAD_VMX_DFORM_TEST(lxsd);
+ LOAD_VSX_XFORM_TEST(lxsibzx);
+ LOAD_VSX_XFORM_TEST(lxsihzx);
+ LOAD_VMX_DFORM_TEST(lxssp);
+ LOAD_VSX_DFORM_TEST(lxv);
+ LOAD_VSX_XFORM_TEST(lxvb16x);
+ LOAD_VSX_XFORM_TEST(lxvh8x);
+ LOAD_VSX_XFORM_TEST(lxvx);
+ LOAD_VSX_XFORM_TEST(lxvwsx);
+ LOAD_VSX_XFORM_TEST(lxvl);
+ LOAD_VSX_XFORM_TEST(lxvll);
+ STORE_VMX_DFORM_TEST(stxsd);
+ STORE_VSX_XFORM_TEST(stxsibx);
+ STORE_VSX_XFORM_TEST(stxsihx);
+ STORE_VMX_DFORM_TEST(stxssp);
+ STORE_VSX_DFORM_TEST(stxv);
+ STORE_VSX_XFORM_TEST(stxvb16x);
+ STORE_VSX_XFORM_TEST(stxvh8x);
+ STORE_VSX_XFORM_TEST(stxvx);
+ STORE_VSX_XFORM_TEST(stxvl);
+ STORE_VSX_XFORM_TEST(stxvll);
+ return rc;
+}
+
+int test_alignment_handler_integer(void)
+{
+ int rc = 0;
+
+ printf("Integer\n");
+ LOAD_DFORM_TEST(lbz);
+ LOAD_DFORM_TEST(lbzu);
+ LOAD_XFORM_TEST(lbzx);
+ LOAD_XFORM_TEST(lbzux);
+ LOAD_DFORM_TEST(lhz);
+ LOAD_DFORM_TEST(lhzu);
+ LOAD_XFORM_TEST(lhzx);
+ LOAD_XFORM_TEST(lhzux);
+ LOAD_DFORM_TEST(lha);
+ LOAD_DFORM_TEST(lhau);
+ LOAD_XFORM_TEST(lhax);
+ LOAD_XFORM_TEST(lhaux);
+ LOAD_XFORM_TEST(lhbrx);
+ LOAD_DFORM_TEST(lwz);
+ LOAD_DFORM_TEST(lwzu);
+ LOAD_XFORM_TEST(lwzx);
+ LOAD_XFORM_TEST(lwzux);
+ LOAD_DFORM_TEST(lwa);
+ LOAD_XFORM_TEST(lwax);
+ LOAD_XFORM_TEST(lwaux);
+ LOAD_XFORM_TEST(lwbrx);
+ LOAD_DFORM_TEST(ld);
+ LOAD_DFORM_TEST(ldu);
+ LOAD_XFORM_TEST(ldx);
+ LOAD_XFORM_TEST(ldux);
+ LOAD_XFORM_TEST(ldbrx);
+ LOAD_DFORM_TEST(lmw);
+ STORE_DFORM_TEST(stb);
+ STORE_XFORM_TEST(stbx);
+ STORE_DFORM_TEST(stbu);
+ STORE_XFORM_TEST(stbux);
+ STORE_DFORM_TEST(sth);
+ STORE_XFORM_TEST(sthx);
+ STORE_DFORM_TEST(sthu);
+ STORE_XFORM_TEST(sthux);
+ STORE_XFORM_TEST(sthbrx);
+ STORE_DFORM_TEST(stw);
+ STORE_XFORM_TEST(stwx);
+ STORE_DFORM_TEST(stwu);
+ STORE_XFORM_TEST(stwux);
+ STORE_XFORM_TEST(stwbrx);
+ STORE_DFORM_TEST(std);
+ STORE_XFORM_TEST(stdx);
+ STORE_DFORM_TEST(stdu);
+ STORE_XFORM_TEST(stdux);
+ STORE_XFORM_TEST(stdbrx);
+ STORE_DFORM_TEST(stmw);
+ return rc;
+}
+
+int test_alignment_handler_vmx(void)
+{
+ int rc = 0;
+
+ printf("VMX\n");
+ LOAD_VMX_XFORM_TEST(lvx);
+
+ /*
+ * FIXME: These loads only load part of the register, so our
+ * testing method doesn't work. Also they don't take alignment
+ * faults, so it's kinda pointless anyway
+ *
+ LOAD_VMX_XFORM_TEST(lvebx)
+ LOAD_VMX_XFORM_TEST(lvehx)
+ LOAD_VMX_XFORM_TEST(lvewx)
+ LOAD_VMX_XFORM_TEST(lvxl)
+ */
+ STORE_VMX_XFORM_TEST(stvx);
+ STORE_VMX_XFORM_TEST(stvebx);
+ STORE_VMX_XFORM_TEST(stvehx);
+ STORE_VMX_XFORM_TEST(stvewx);
+ STORE_VMX_XFORM_TEST(stvxl);
+ return rc;
+}
+
+int test_alignment_handler_fp(void)
+{
+ int rc = 0;
+
+ printf("Floating point\n");
+ LOAD_FLOAT_DFORM_TEST(lfd);
+ LOAD_FLOAT_XFORM_TEST(lfdx);
+ LOAD_FLOAT_DFORM_TEST(lfdp);
+ LOAD_FLOAT_XFORM_TEST(lfdpx);
+ LOAD_FLOAT_DFORM_TEST(lfdu);
+ LOAD_FLOAT_XFORM_TEST(lfdux);
+ LOAD_FLOAT_DFORM_TEST(lfs);
+ LOAD_FLOAT_XFORM_TEST(lfsx);
+ LOAD_FLOAT_DFORM_TEST(lfsu);
+ LOAD_FLOAT_XFORM_TEST(lfsux);
+ LOAD_FLOAT_XFORM_TEST(lfiwzx);
+ LOAD_FLOAT_XFORM_TEST(lfiwax);
+ STORE_FLOAT_DFORM_TEST(stfd);
+ STORE_FLOAT_XFORM_TEST(stfdx);
+ STORE_FLOAT_DFORM_TEST(stfdp);
+ STORE_FLOAT_XFORM_TEST(stfdpx);
+ STORE_FLOAT_DFORM_TEST(stfdu);
+ STORE_FLOAT_XFORM_TEST(stfdux);
+ STORE_FLOAT_DFORM_TEST(stfs);
+ STORE_FLOAT_XFORM_TEST(stfsx);
+ STORE_FLOAT_DFORM_TEST(stfsu);
+ STORE_FLOAT_XFORM_TEST(stfsux);
+ STORE_FLOAT_XFORM_TEST(stfiwx);
+
+ return rc;
+}
+
+void usage(char *prog)
+{
+ printf("Usage: %s [options]\n", prog);
+ printf(" -d Enable debug error output\n");
+ printf("\n");
+ printf("This test requires a POWER8 or POWER9 CPU and a usable ");
+ printf("framebuffer at /dev/fb0.\n");
+}
+
+int main(int argc, char *argv[])
+{
+
+ struct sigaction sa;
+ int rc = 0;
+ int option = 0;
+
+ while ((option = getopt(argc, argv, "d")) != -1) {
+ switch (option) {
+ case 'd':
+ debug++;
+ break;
+ default:
+ usage(argv[0]);
+ exit(1);
+ }
+ }
+
+ bufsize = getpagesize();
+
+ sa.sa_sigaction = sighandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGSEGV, &sa, NULL) == -1
+ || sigaction(SIGBUS, &sa, NULL) == -1
+ || sigaction(SIGILL, &sa, NULL) == -1) {
+ perror("sigaction");
+ exit(1);
+ }
+
+ rc |= test_harness(test_alignment_handler_vsx_206,
+ "test_alignment_handler_vsx_206");
+ rc |= test_harness(test_alignment_handler_vsx_207,
+ "test_alignment_handler_vsx_207");
+ rc |= test_harness(test_alignment_handler_vsx_300,
+ "test_alignment_handler_vsx_300");
+ rc |= test_harness(test_alignment_handler_integer,
+ "test_alignment_handler_integer");
+ rc |= test_harness(test_alignment_handler_vmx,
+ "test_alignment_handler_vmx");
+ rc |= test_harness(test_alignment_handler_fp,
+ "test_alignment_handler_fp");
+ return rc;
+}
diff --git a/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c b/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
index 8d084a2d6e74..7a0a462a2272 100644
--- a/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
+++ b/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
@@ -7,17 +7,34 @@
#include <stdlib.h>
#include <sys/mman.h>
#include <time.h>
+#include <getopt.h>
#include "utils.h"
#define ITERATIONS 5000000
-#define MEMSIZE (128 * 1024 * 1024)
+#define MEMSIZE (1UL << 27)
+#define PAGE_SIZE (1UL << 16)
+#define CHUNK_COUNT (MEMSIZE/PAGE_SIZE)
+
+static int pg_fault;
+static int iterations = ITERATIONS;
+
+static struct option options[] = {
+ { "pgfault", no_argument, &pg_fault, 1 },
+ { "iterations", required_argument, 0, 'i' },
+ { 0, },
+};
+
+static void usage(void)
+{
+ printf("mmap_bench <--pgfault> <--iterations count>\n");
+}
int test_mmap(void)
{
struct timespec ts_start, ts_end;
- unsigned long i = ITERATIONS;
+ unsigned long i = iterations;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
@@ -25,6 +42,11 @@ int test_mmap(void)
char *c = mmap(NULL, MEMSIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
FAIL_IF(c == MAP_FAILED);
+ if (pg_fault) {
+ int count;
+ for (count = 0; count < CHUNK_COUNT; count++)
+ c[count << 16] = 'c';
+ }
munmap(c, MEMSIZE);
}
@@ -35,7 +57,32 @@ int test_mmap(void)
return 0;
}
-int main(void)
+int main(int argc, char *argv[])
{
+ signed char c;
+ while (1) {
+ int option_index = 0;
+
+ c = getopt_long(argc, argv, "", options, &option_index);
+
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 0:
+ if (options[option_index].flag != 0)
+ break;
+
+ usage();
+ exit(1);
+ break;
+ case 'i':
+ iterations = atoi(optarg);
+ break;
+ default:
+ usage();
+ exit(1);
+ }
+ }
return test_harness(test_mmap, "mmap_bench");
}
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index e715a3f2fbf4..7d7c42ed6de9 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -1,4 +1,5 @@
hugetlb_vs_thp_test
subpage_prot
tempfile
-prot_sao \ No newline at end of file
+prot_sao
+segv_errors \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index bf315bcbe663..8ebbe96d80a8 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,7 +2,7 @@
noarg:
$(MAKE) -C ../
-TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao
+TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
TEST_GEN_FILES := tempfile
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/mm/segv_errors.c b/tools/testing/selftests/powerpc/mm/segv_errors.c
new file mode 100644
index 000000000000..06ae76ee3ea1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/segv_errors.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2017 John Sperbeck
+ *
+ * Test that an access to a mapped but inaccessible area causes a SEGV and
+ * reports si_code == SEGV_ACCERR.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <assert.h>
+#include <ucontext.h>
+
+#include "utils.h"
+
+static bool faulted;
+static int si_code;
+
+static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
+{
+ ucontext_t *ctxt = (ucontext_t *)ctxt_v;
+ struct pt_regs *regs = ctxt->uc_mcontext.regs;
+
+ faulted = true;
+ si_code = info->si_code;
+ regs->nip += 4;
+}
+
+int test_segv_errors(void)
+{
+ struct sigaction act = {
+ .sa_sigaction = segv_handler,
+ .sa_flags = SA_SIGINFO,
+ };
+ char c, *p = NULL;
+
+ p = mmap(NULL, getpagesize(), 0, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ FAIL_IF(p == MAP_FAILED);
+
+ FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0);
+
+ faulted = false;
+ si_code = 0;
+
+ /*
+ * We just need a compiler barrier, but mb() works and has the nice
+ * property of being easy to spot in the disassembly.
+ */
+ mb();
+ c = *p;
+ mb();
+
+ FAIL_IF(!faulted);
+ FAIL_IF(si_code != SEGV_ACCERR);
+
+ faulted = false;
+ si_code = 0;
+
+ mb();
+ *p = c;
+ mb();
+
+ FAIL_IF(!faulted);
+ FAIL_IF(si_code != SEGV_ACCERR);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_segv_errors, "segv_errors");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
index 0df3c23b7888..277dade1b382 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -79,8 +79,8 @@ trans:
: [res] "=r" (result), [texasr] "=r" (texasr)
: [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
[sprn_texasr] "i" (SPRN_TEXASR)
- : "memory", "r0", "r1", "r2", "r3", "r4",
- "r8", "r9", "r10", "r11"
+ : "memory", "r0", "r1", "r3", "r4",
+ "r7", "r8", "r9", "r10", "r11"
);
if (result) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
index 94e57cb89769..51427a2465f6 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -76,8 +76,7 @@ trans:
: [tfhar] "=r" (tfhar), [res] "=r" (result),
[texasr] "=r" (texasr), [cptr1] "=r" (cptr1)
: [sprn_texasr] "i" (SPRN_TEXASR)
- : "memory", "r0", "r1", "r2", "r3", "r4",
- "r8", "r9", "r10", "r11", "r31"
+ : "memory", "r0", "r8", "r31"
);
/* There are 2 32bit instructions before tbegin. */
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
index b4081e2b22d5..17c23cabac3e 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -67,7 +67,7 @@ trans:
: [res] "=r" (result), [texasr] "=r" (texasr)
: [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
[sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "r" (&cptr[1])
- : "memory", "r0", "r1", "r2", "r3", "r4",
+ : "memory", "r0", "r1", "r3", "r4",
"r7", "r8", "r9", "r10", "r11"
);
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 241a4a4ee0e4..bb90d4b79524 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -13,3 +13,4 @@ tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
tm-vmx-unavail
tm-unavailable
+tm-trap
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 8ed6f8c57230..a23453943ad2 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -3,7 +3,7 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
tm-signal-context-chk-vmx tm-signal-context-chk-vsx
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
- tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable \
+ tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
$(SIGNAL_CONTEXT_CHK_TESTS)
include ../../lib.mk
@@ -18,6 +18,7 @@ $(OUTPUT)/tm-tmspr: CFLAGS += -pthread
$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
$(OUTPUT)/tm-resched-dscr: ../pmu/lib.o
$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
+$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c
new file mode 100644
index 000000000000..5d92c23ee6cb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-trap.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2017, Gustavo Romero, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Check if thread endianness is flipped inadvertently to BE on trap
+ * caught in TM whilst MSR.FP and MSR.VEC are zero (i.e. just after
+ * load_fp and load_vec overflowed).
+ *
+ * The issue can be checked on LE machines simply by zeroing load_fp
+ * and load_vec and then causing a trap in TM. Since the endianness
+ * changes to BE on return from the signal handler, 'nop' is
+ * thread as an illegal instruction in following sequence:
+ * tbegin.
+ * beq 1f
+ * trap
+ * tend.
+ * 1: nop
+ *
+ * However, although the issue is also present on BE machines, it's a
+ * bit trickier to check it on BE machines because MSR.LE bit is set
+ * to zero which determines a BE endianness that is the native
+ * endianness on BE machines, so nothing notably critical happens,
+ * i.e. no illegal instruction is observed immediately after returning
+ * from the signal handler (as it happens on LE machines). Thus to test
+ * it on BE machines LE endianness is forced after a first trap and then
+ * the endianness is verified on subsequent traps to determine if the
+ * endianness "flipped back" to the native endianness (BE).
+ */
+
+#define _GNU_SOURCE
+#include <error.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <htmintrin.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include "tm.h"
+#include "utils.h"
+
+#define pr_error(error_code, format, ...) \
+ error_at_line(1, error_code, __FILE__, __LINE__, format, ##__VA_ARGS__)
+
+#define MSR_LE 1UL
+#define LE 1UL
+
+pthread_t t0_ping;
+pthread_t t1_pong;
+
+int exit_from_pong;
+
+int trap_event;
+int le;
+
+bool success;
+
+void trap_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = uc;
+ uint64_t thread_endianness;
+
+ /* Get thread endianness: extract bit LE from MSR */
+ thread_endianness = MSR_LE & ucp->uc_mcontext.gp_regs[PT_MSR];
+
+ /***
+ * Little-Endian Machine
+ */
+
+ if (le) {
+ /* First trap event */
+ if (trap_event == 0) {
+ /* Do nothing. Since it is returning from this trap
+ * event that endianness is flipped by the bug, so just
+ * let the process return from the signal handler and
+ * check on the second trap event if endianness is
+ * flipped or not.
+ */
+ }
+ /* Second trap event */
+ else if (trap_event == 1) {
+ /*
+ * Since trap was caught in TM on first trap event, if
+ * endianness was still LE (not flipped inadvertently)
+ * after returning from the signal handler instruction
+ * (1) is executed (basically a 'nop'), as it's located
+ * at address of tbegin. +4 (rollback addr). As (1) on
+ * LE endianness does in effect nothing, instruction (2)
+ * is then executed again as 'trap', generating a second
+ * trap event (note that in that case 'trap' is caught
+ * not in transacional mode). On te other hand, if after
+ * the return from the signal handler the endianness in-
+ * advertently flipped, instruction (1) is tread as a
+ * branch instruction, i.e. b .+8, hence instruction (3)
+ * and (4) are executed (tbegin.; trap;) and we get sim-
+ * ilaly on the trap signal handler, but now in TM mode.
+ * Either way, it's now possible to check the MSR LE bit
+ * once in the trap handler to verify if endianness was
+ * flipped or not after the return from the second trap
+ * event. If endianness is flipped, the bug is present.
+ * Finally, getting a trap in TM mode or not is just
+ * worth noting because it affects the math to determine
+ * the offset added to the NIP on return: the NIP for a
+ * trap caught in TM is the rollback address, i.e. the
+ * next instruction after 'tbegin.', whilst the NIP for
+ * a trap caught in non-transactional mode is the very
+ * same address of the 'trap' instruction that generated
+ * the trap event.
+ */
+
+ if (thread_endianness == LE) {
+ /* Go to 'success', i.e. instruction (6) */
+ ucp->uc_mcontext.gp_regs[PT_NIP] += 16;
+ } else {
+ /*
+ * Thread endianness is BE, so it flipped
+ * inadvertently. Thus we flip back to LE and
+ * set NIP to go to 'failure', instruction (5).
+ */
+ ucp->uc_mcontext.gp_regs[PT_MSR] |= 1UL;
+ ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
+ }
+ }
+ }
+
+ /***
+ * Big-Endian Machine
+ */
+
+ else {
+ /* First trap event */
+ if (trap_event == 0) {
+ /*
+ * Force thread endianness to be LE. Instructions (1),
+ * (3), and (4) will be executed, generating a second
+ * trap in TM mode.
+ */
+ ucp->uc_mcontext.gp_regs[PT_MSR] |= 1UL;
+ }
+ /* Second trap event */
+ else if (trap_event == 1) {
+ /*
+ * Do nothing. If bug is present on return from this
+ * second trap event endianness will flip back "automat-
+ * ically" to BE, otherwise thread endianness will
+ * continue to be LE, just as it was set above.
+ */
+ }
+ /* A third trap event */
+ else {
+ /*
+ * Once here it means that after returning from the sec-
+ * ond trap event instruction (4) (trap) was executed
+ * as LE, generating a third trap event. In that case
+ * endianness is still LE as set on return from the
+ * first trap event, hence no bug. Otherwise, bug
+ * flipped back to BE on return from the second trap
+ * event and instruction (4) was executed as 'tdi' (so
+ * basically a 'nop') and branch to 'failure' in
+ * instruction (5) was taken to indicate failure and we
+ * never get here.
+ */
+
+ /*
+ * Flip back to BE and go to instruction (6), i.e. go to
+ * 'success'.
+ */
+ ucp->uc_mcontext.gp_regs[PT_MSR] &= ~1UL;
+ ucp->uc_mcontext.gp_regs[PT_NIP] += 8;
+ }
+ }
+
+ trap_event++;
+}
+
+void usr1_signal_handler(int signo, siginfo_t *si, void *not_used)
+{
+ /* Got a USR1 signal from ping(), so just tell pong() to exit */
+ exit_from_pong = 1;
+}
+
+void *ping(void *not_used)
+{
+ uint64_t i;
+
+ trap_event = 0;
+
+ /*
+ * Wait an amount of context switches so load_fp and load_vec overflows
+ * and MSR_[FP|VEC|V] is 0.
+ */
+ for (i = 0; i < 1024*1024*512; i++)
+ ;
+
+ asm goto(
+ /*
+ * [NA] means "Native Endianness", i.e. it tells how a
+ * instruction is executed on machine's native endianness (in
+ * other words, native endianness matches kernel endianness).
+ * [OP] means "Opposite Endianness", i.e. on a BE machine, it
+ * tells how a instruction is executed as a LE instruction; con-
+ * versely, on a LE machine, it tells how a instruction is
+ * executed as a BE instruction. When [NA] is omitted, it means
+ * that the native interpretation of a given instruction is not
+ * relevant for the test. Likewise when [OP] is omitted.
+ */
+
+ " tbegin. ;" /* (0) tbegin. [NA] */
+ " tdi 0, 0, 0x48;" /* (1) nop [NA]; b (3) [OP] */
+ " trap ;" /* (2) trap [NA] */
+ ".long 0x1D05007C;" /* (3) tbegin. [OP] */
+ ".long 0x0800E07F;" /* (4) trap [OP]; nop [NA] */
+ " b %l[failure] ;" /* (5) b [NA]; MSR.LE flipped (bug) */
+ " b %l[success] ;" /* (6) b [NA]; MSR.LE did not flip (ok)*/
+
+ : : : : failure, success);
+
+failure:
+ success = false;
+ goto exit_from_ping;
+
+success:
+ success = true;
+
+exit_from_ping:
+ /* Tell pong() to exit before leaving */
+ pthread_kill(t1_pong, SIGUSR1);
+ return NULL;
+}
+
+void *pong(void *not_used)
+{
+ while (!exit_from_pong)
+ /*
+ * Induce context switches on ping() thread
+ * until ping() finishes its job and signs
+ * to exit from this loop.
+ */
+ sched_yield();
+
+ return NULL;
+}
+
+int tm_trap_test(void)
+{
+ uint16_t k = 1;
+
+ int rc;
+
+ pthread_attr_t attr;
+ cpu_set_t cpuset;
+
+ struct sigaction trap_sa;
+
+ trap_sa.sa_flags = SA_SIGINFO;
+ trap_sa.sa_sigaction = trap_signal_handler;
+ sigaction(SIGTRAP, &trap_sa, NULL);
+
+ struct sigaction usr1_sa;
+
+ usr1_sa.sa_flags = SA_SIGINFO;
+ usr1_sa.sa_sigaction = usr1_signal_handler;
+ sigaction(SIGUSR1, &usr1_sa, NULL);
+
+ /* Set only CPU 0 in the mask. Both threads will be bound to cpu 0. */
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+
+ /* Init pthread attribute */
+ rc = pthread_attr_init(&attr);
+ if (rc)
+ pr_error(rc, "pthread_attr_init()");
+
+ /*
+ * Bind thread ping() and pong() both to CPU 0 so they ping-pong and
+ * speed up context switches on ping() thread, speeding up the load_fp
+ * and load_vec overflow.
+ */
+ rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
+ if (rc)
+ pr_error(rc, "pthread_attr_setaffinity()");
+
+ /* Figure out the machine endianness */
+ le = (int) *(uint8_t *)&k;
+
+ printf("%s machine detected. Checking if endianness flips %s",
+ le ? "Little-Endian" : "Big-Endian",
+ "inadvertently on trap in TM... ");
+
+ rc = fflush(0);
+ if (rc)
+ pr_error(rc, "fflush()");
+
+ /* Launch ping() */
+ rc = pthread_create(&t0_ping, &attr, ping, NULL);
+ if (rc)
+ pr_error(rc, "pthread_create()");
+
+ exit_from_pong = 0;
+
+ /* Launch pong() */
+ rc = pthread_create(&t1_pong, &attr, pong, NULL);
+ if (rc)
+ pr_error(rc, "pthread_create()");
+
+ rc = pthread_join(t0_ping, NULL);
+ if (rc)
+ pr_error(rc, "pthread_join()");
+
+ rc = pthread_join(t1_pong, NULL);
+ if (rc)
+ pr_error(rc, "pthread_join()");
+
+ if (success) {
+ printf("no.\n"); /* no, endianness did not flip inadvertently */
+ return EXIT_SUCCESS;
+ }
+
+ printf("yes!\n"); /* yes, endianness did flip inadvertently */
+ return EXIT_FAILURE;
+}
+
+int main(int argc, char **argv)
+{
+ return test_harness(tm_trap_test, "tm_trap_test");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-unavailable.c b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
index 96c37f84ce54..e6a0fad2bfd0 100644
--- a/tools/testing/selftests/powerpc/tm/tm-unavailable.c
+++ b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
@@ -15,6 +15,7 @@
*/
#define _GNU_SOURCE
+#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -33,6 +34,11 @@
#define VSX_UNA_EXCEPTION 2
#define NUM_EXCEPTIONS 3
+#define err_at_line(status, errnum, format, ...) \
+ error_at_line(status, errnum, __FILE__, __LINE__, format ##__VA_ARGS__)
+
+#define pr_warn(code, format, ...) err_at_line(0, code, format, ##__VA_ARGS__)
+#define pr_err(code, format, ...) err_at_line(1, code, format, ##__VA_ARGS__)
struct Flags {
int touch_fp;
@@ -303,10 +309,19 @@ void test_fp_vec(int fp, int vec, pthread_attr_t *attr)
* checking if the failure cause is the one we expect.
*/
do {
+ int rc;
+
/* Bind 'ping' to CPU 0, as specified in 'attr'. */
- pthread_create(&t0, attr, ping, (void *) &flags);
- pthread_setname_np(t0, "ping");
- pthread_join(t0, &ret_value);
+ rc = pthread_create(&t0, attr, ping, (void *) &flags);
+ if (rc)
+ pr_err(rc, "pthread_create()");
+ rc = pthread_setname_np(t0, "ping");
+ if (rc)
+ pr_warn(rc, "pthread_setname_np");
+ rc = pthread_join(t0, &ret_value);
+ if (rc)
+ pr_err(rc, "pthread_join");
+
retries--;
} while (ret_value != NULL && retries);
@@ -320,7 +335,7 @@ void test_fp_vec(int fp, int vec, pthread_attr_t *attr)
int main(int argc, char **argv)
{
- int exception; /* FP = 0, VEC = 1, VSX = 2 */
+ int rc, exception; /* FP = 0, VEC = 1, VSX = 2 */
pthread_t t1;
pthread_attr_t attr;
cpu_set_t cpuset;
@@ -330,13 +345,23 @@ int main(int argc, char **argv)
CPU_SET(0, &cpuset);
/* Init pthread attribute. */
- pthread_attr_init(&attr);
+ rc = pthread_attr_init(&attr);
+ if (rc)
+ pr_err(rc, "pthread_attr_init()");
/* Set CPU 0 mask into the pthread attribute. */
- pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
-
- pthread_create(&t1, &attr /* Bind 'pong' to CPU 0 */, pong, NULL);
- pthread_setname_np(t1, "pong"); /* Name it for systemtap convenience */
+ rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
+ if (rc)
+ pr_err(rc, "pthread_attr_setaffinity_np()");
+
+ rc = pthread_create(&t1, &attr /* Bind 'pong' to CPU 0 */, pong, NULL);
+ if (rc)
+ pr_err(rc, "pthread_create()");
+
+ /* Name it for systemtap convenience */
+ rc = pthread_setname_np(t1, "pong");
+ if (rc)
+ pr_warn(rc, "pthread_create()");
flags.result = 0;
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index 5d2eae16f7ee..a5d8f0ab0da0 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -60,9 +60,7 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-
- return FD_TO_CLOCKID(fd);
+ return (((unsigned int) ~fd) << 3) | CLOCKFD;
}
static void handle_alarm(int s)
diff --git a/tools/testing/selftests/rcutorture/bin/config2frag.sh b/tools/testing/selftests/rcutorture/bin/config2frag.sh
deleted file mode 100755
index 56f51ae13d73..000000000000
--- a/tools/testing/selftests/rcutorture/bin/config2frag.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Usage: config2frag.sh < .config > configfrag
-#
-# Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the
-# resulting file becomes a legitimate Kconfig fragment.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (C) IBM Corporation, 2013
-#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-
-LANG=C sed -e 's/^# CONFIG_\([a-zA-Z0-9_]*\) is not set$/CONFIG_\1=n/'
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 51f66a7ce876..c15f270e121d 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -51,7 +51,7 @@ then
mkdir $builddir
fi
else
- echo Bad build directory: \"$builddir\"
+ echo Bad build directory: \"$buildloc\"
exit 2
fi
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index fb66d0173638..34d126734cde 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -29,11 +29,6 @@ then
exit 1
fi
builddir=${2}
-if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
-then
- echo "kvm-build.sh :$builddir: Not a writable directory, cannot build into it"
- exit 1
-fi
T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
index 43f764098e50..2de92f43ee8c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -23,7 +23,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 559e01ac86be..c2e1bb6d0cba 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -23,14 +23,14 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
echo Unreadable results directory: $i
exit 1
fi
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
configfile=`echo $i | sed -e 's/^.*\///'`
ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
index f79b0e9e84fc..963f71289d22 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
@@ -26,7 +26,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
if test "`grep -c 'rcu_exp_grace_period.*start' < $i/console.log`" -lt 100
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
index 8f3121afc716..ccebf772fa1e 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
@@ -23,7 +23,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
@@ -31,7 +31,7 @@ else
exit 1
fi
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
if kvm-recheck-rcuperf-ftrace.sh $i
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index f659346d3358..f7e988f369dd 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -25,7 +25,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
for rd in "$@"
do
firsttime=1
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index ab14b97c942c..1b78a12740e5 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -42,7 +42,7 @@ T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
mkdir $T
-. $KVM/bin/functions.sh
+. functions.sh
. $CONFIGFRAG/ver_functions.sh
config_template=${1}
@@ -154,9 +154,7 @@ cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
vcpus=`identify_qemu_vcpus`
if test $cpu_count -gt $vcpus
then
- echo CPU count limited from $cpu_count to $vcpus
- touch $resdir/Warnings
- echo CPU count limited from $cpu_count to $vcpus >> $resdir/Warnings
+ echo CPU count limited from $cpu_count to $vcpus | tee -a $resdir/Warnings
cpu_count=$vcpus
fi
qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index ccd49e958fd2..7d1f607f0f76 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -1,8 +1,7 @@
#!/bin/bash
#
# Run a series of 14 tests under KVM. These are not particularly
-# well-selected or well-tuned, but are the current set. Run from the
-# top level of the source tree.
+# well-selected or well-tuned, but are the current set.
#
# Edit the definitions below to set the locations of the various directories,
# as well as the test duration.
@@ -34,6 +33,8 @@ T=${TMPDIR-/tmp}/kvm.sh.$$
trap 'rm -rf $T' 0
mkdir $T
+cd `dirname $scriptname`/../../../../../
+
dur=$((30*60))
dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
@@ -70,7 +71,7 @@ usage () {
echo " --kmake-arg kernel-make-arguments"
echo " --mac nn:nn:nn:nn:nn:nn"
echo " --no-initrd"
- echo " --qemu-args qemu-system-..."
+ echo " --qemu-args qemu-arguments"
echo " --qemu-cmd qemu-system-..."
echo " --results absolute-pathname"
echo " --torture rcu"
@@ -150,7 +151,7 @@ do
TORTURE_INITRD=""; export TORTURE_INITRD
;;
--qemu-args|--qemu-arg)
- checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
+ checkarg --qemu-args "(qemu arguments)" $# "$2" '^-' '^error'
TORTURE_QEMU_ARG="$2"
shift
;;
@@ -238,7 +239,6 @@ BEGIN {
}
END {
- alldone = 0;
batch = 0;
nc = -1;
@@ -331,8 +331,7 @@ awk < $T/cfgcpu.pack \
# Dump out the scripting required to run one test batch.
function dump(first, pastlast, batchnum)
{
- print "echo ----Start batch " batchnum ": `date`";
- print "echo ----Start batch " batchnum ": `date` >> " rd "/log";
+ print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
print "needqemurun="
jn=1
for (j = first; j < pastlast; j++) {
@@ -349,21 +348,18 @@ function dump(first, pastlast, batchnum)
ovf = "-ovf";
else
ovf = "";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
print "rm -f " builddir ".*";
print "touch " builddir ".wait";
print "mkdir " builddir " > /dev/null 2>&1 || :";
print "mkdir " rd cfr[jn] " || :";
print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
- print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
print "while test -f " builddir ".wait"
print "do"
print "\tsleep 1"
print "done"
- print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` | tee -a " rd "log";
jn++;
}
for (j = 1; j < jn; j++) {
@@ -371,8 +367,7 @@ function dump(first, pastlast, batchnum)
print "rm -f " builddir ".ready"
print "if test -f \"" rd cfr[j] "/builtkernel\""
print "then"
- print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date`";
- print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` >> " rd "/log";
+ print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` | tee -a " rd "log";
print "\tneedqemurun=1"
print "fi"
}
@@ -386,31 +381,26 @@ function dump(first, pastlast, batchnum)
njitter = ja[1];
if (TORTURE_BUILDONLY && njitter != 0) {
njitter = 0;
- print "echo Build-only run, so suppressing jitter >> " rd "/log"
+ print "echo Build-only run, so suppressing jitter | tee -a " rd "log"
}
if (TORTURE_BUILDONLY) {
print "needqemurun="
}
print "if test -n \"$needqemurun\""
print "then"
- print "\techo ---- Starting kernels. `date`";
- print "\techo ---- Starting kernels. `date` >> " rd "/log";
+ print "\techo ---- Starting kernels. `date` | tee -a " rd "log";
for (j = 0; j < njitter; j++)
print "\tjitter.sh " j " " dur " " ja[2] " " ja[3] "&"
print "\twait"
- print "\techo ---- All kernel runs complete. `date`";
- print "\techo ---- All kernel runs complete. `date` >> " rd "/log";
+ print "\techo ---- All kernel runs complete. `date` | tee -a " rd "log";
print "else"
print "\twait"
- print "\techo ---- No kernel runs. `date`";
- print "\techo ---- No kernel runs. `date` >> " rd "/log";
+ print "\techo ---- No kernel runs. `date` | tee -a " rd "log";
print "fi"
for (j = 1; j < jn; j++) {
builddir=KVM "/b" j
- print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
- print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log";
- print "cat " rd cfr[j] "/kvm-test-1-run.sh.out";
- print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log";
+ print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: | tee -a " rd "log";
+ print "cat " rd cfr[j] "/kvm-test-1-run.sh.out | tee -a " rd "log";
}
}
diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
index f12c38909b00..5987e50cfeb4 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
@@ -55,7 +55,7 @@ then
exit
fi
-grep --binary-files=text 'torture:.*ver:' $file | grep --binary-files=text -v '(null)' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
+grep --binary-files=text 'torture:.*ver:' $file | egrep --binary-files=text -v '\(null\)|rtc: 000000000* ' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
awk '
BEGIN {
ver = 0;
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index 252aae618984..80eb646e1319 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -38,6 +38,5 @@ per_version_boot_params () {
echo $1 `locktorture_param_onoff "$1" "$2"` \
locktorture.stat_interval=15 \
locktorture.shutdown_secs=$3 \
- locktorture.torture_runnable=1 \
locktorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index ffb85ed786fa..24ec91041957 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -51,7 +51,6 @@ per_version_boot_params () {
`rcutorture_param_n_barrier_cbs "$1"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
- rcutorture.torture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
index 34f2a1b35ee5..b9603115d7c7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
@@ -46,7 +46,6 @@ rcuperf_param_nwriters () {
per_version_boot_params () {
echo $1 `rcuperf_param_nreaders "$1"` \
`rcuperf_param_nwriters "$1"` \
- rcuperf.perf_runnable=1 \
rcuperf.shutdown=1 \
rcuperf.verbose=1
}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 92db48825dc1..5df609950a66 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1726,7 +1726,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
if (nr == __NR_getpid)
change_syscall(_metadata, tracee, __NR_getppid);
- if (nr == __NR_open)
+ if (nr == __NR_openat)
change_syscall(_metadata, tracee, -1);
}
@@ -1801,7 +1801,7 @@ TEST_F(TRACE_syscall, ptrace_syscall_dropped)
true);
/* Tracer should skip the open syscall, resulting in EPERM. */
- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open));
+ EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
}
TEST_F(TRACE_syscall, syscall_allowed)
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 1ca2ee4d15b9..63c94d776e89 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -10,3 +10,4 @@ userfaultfd
mlock-intersect-test
mlock-random-test
virtual_address_range
+gup_benchmark
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 7f45806bd863..fdefa2295ddc 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -8,17 +8,18 @@ endif
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
LDLIBS = -lrt
TEST_GEN_FILES = compaction_test
+TEST_GEN_FILES += gup_benchmark
TEST_GEN_FILES += hugepage-mmap
TEST_GEN_FILES += hugepage-shm
TEST_GEN_FILES += map_hugetlb
+TEST_GEN_FILES += mlock-random-test
TEST_GEN_FILES += mlock2-tests
TEST_GEN_FILES += on-fault-limit
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
-TEST_GEN_FILES += mlock-random-test
+TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
-TEST_GEN_FILES += gup_benchmark
TEST_PROGS := run_vmtests
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
index a65b016d4c13..1097f04e4d80 100644
--- a/tools/testing/selftests/vm/compaction_test.c
+++ b/tools/testing/selftests/vm/compaction_test.c
@@ -137,6 +137,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
printf("No of huge pages allocated = %d\n",
(atoi(nr_hugepages)));
+ lseek(fd, 0, SEEK_SET);
+
if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index cc826326de87..d2561895a021 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -177,4 +177,15 @@ else
echo "[PASS]"
fi
+echo "-----------------------------"
+echo "running virtual address 128TB switch test"
+echo "-----------------------------"
+./va_128TBswitch
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/va_128TBswitch.c b/tools/testing/selftests/vm/va_128TBswitch.c
new file mode 100644
index 000000000000..e7fe734c374f
--- /dev/null
+++ b/tools/testing/selftests/vm/va_128TBswitch.c
@@ -0,0 +1,297 @@
+/*
+ *
+ * Authors: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+ * Authors: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <string.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#ifdef __powerpc64__
+#define PAGE_SIZE (64 << 10)
+/*
+ * This will work with 16M and 2M hugepage size
+ */
+#define HUGETLB_SIZE (16 << 20)
+#else
+#define PAGE_SIZE (4 << 10)
+#define HUGETLB_SIZE (2 << 20)
+#endif
+
+/*
+ * >= 128TB is the hint addr value we used to select
+ * large address space.
+ */
+#define ADDR_SWITCH_HINT (1UL << 47)
+#define LOW_ADDR ((void *) (1UL << 30))
+#define HIGH_ADDR ((void *) (1UL << 48))
+
+struct testcase {
+ void *addr;
+ unsigned long size;
+ unsigned long flags;
+ const char *msg;
+ unsigned int low_addr_required:1;
+ unsigned int keep_mapped:1;
+};
+
+static struct testcase testcases[] = {
+ {
+ /*
+ * If stack is moved, we could possibly allocate
+ * this at the requested address.
+ */
+ .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
+ .low_addr_required = 1,
+ },
+ {
+ /*
+ * We should never allocate at the requested address or above it
+ * The len cross the 128TB boundary. Without MAP_FIXED
+ * we will always search in the lower address space.
+ */
+ .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, (2 * PAGE_SIZE))",
+ .low_addr_required = 1,
+ },
+ {
+ /*
+ * Exact mapping at 128TB, the area is free we should get that
+ * even without MAP_FIXED.
+ */
+ .addr = ((void *)(ADDR_SWITCH_HINT)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
+ },
+ {
+ .addr = NULL,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
+ },
+ {
+ .addr = (void *) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1) again",
+ },
+ {
+ .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2 * PAGE_SIZE)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE / 2),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE/2 , 2 * PAGE_SIZE)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = ((void *)(ADDR_SWITCH_HINT)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
+ },
+};
+
+static struct testcase hugetlb_testcases[] = {
+ {
+ .addr = NULL,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
+ },
+ {
+ .addr = (void *) -1,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *) -1,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB) again",
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
+ .size = 2 * HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2*HUGETLB_SIZE, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT),
+ .size = 2 * HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(ADDR_SWITCH_HINT , 2*HUGETLB_SIZE, MAP_FIXED | MAP_HUGETLB)",
+ },
+};
+
+static int run_test(struct testcase *test, int count)
+{
+ void *p;
+ int i, ret = 0;
+
+ for (i = 0; i < count; i++) {
+ struct testcase *t = test + i;
+
+ p = mmap(t->addr, t->size, PROT_READ | PROT_WRITE, t->flags, -1, 0);
+
+ printf("%s: %p - ", t->msg, p);
+
+ if (p == MAP_FAILED) {
+ printf("FAILED\n");
+ ret = 1;
+ continue;
+ }
+
+ if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
+ printf("FAILED\n");
+ ret = 1;
+ } else {
+ /*
+ * Do a dereference of the address returned so that we catch
+ * bugs in page fault handling
+ */
+ memset(p, 0, t->size);
+ printf("OK\n");
+ }
+ if (!t->keep_mapped)
+ munmap(p, t->size);
+ }
+
+ return ret;
+}
+
+static int supported_arch(void)
+{
+#if defined(__powerpc64__)
+ return 1;
+#elif defined(__x86_64__)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ if (!supported_arch())
+ return 0;
+
+ ret = run_test(testcases, ARRAY_SIZE(testcases));
+ if (argc == 2 && !strcmp(argv[1], "--run-hugetlb"))
+ ret = run_test(hugetlb_testcases, ARRAY_SIZE(hugetlb_testcases));
+ return ret;
+}
diff --git a/tools/testing/selftests/x86/5lvl.c b/tools/testing/selftests/x86/5lvl.c
deleted file mode 100644
index 2eafdcd4c2b3..000000000000
--- a/tools/testing/selftests/x86/5lvl.c
+++ /dev/null
@@ -1,177 +0,0 @@
-#include <stdio.h>
-#include <sys/mman.h>
-
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
-#define PAGE_SIZE 4096
-#define LOW_ADDR ((void *) (1UL << 30))
-#define HIGH_ADDR ((void *) (1UL << 50))
-
-struct testcase {
- void *addr;
- unsigned long size;
- unsigned long flags;
- const char *msg;
- unsigned int low_addr_required:1;
- unsigned int keep_mapped:1;
-};
-
-static struct testcase testcases[] = {
- {
- .addr = NULL,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(NULL)",
- .low_addr_required = 1,
- },
- {
- .addr = LOW_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(LOW_ADDR)",
- .low_addr_required = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR)",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR) again",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
- },
- {
- .addr = (void*) -1,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1)",
- .keep_mapped = 1,
- },
- {
- .addr = (void*) -1,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1) again",
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
- },
- {
- .addr = NULL,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(NULL, MAP_HUGETLB)",
- .low_addr_required = 1,
- },
- {
- .addr = LOW_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
- .low_addr_required = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
- },
- {
- .addr = (void*) -1,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1, MAP_HUGETLB)",
- .keep_mapped = 1,
- },
- {
- .addr = (void*) -1,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1, MAP_HUGETLB) again",
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE),
- .size = 4UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)((1UL << 47) - (2UL << 20)),
- .size = 4UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
- },
-};
-
-int main(int argc, char **argv)
-{
- int i;
- void *p;
-
- for (i = 0; i < ARRAY_SIZE(testcases); i++) {
- struct testcase *t = testcases + i;
-
- p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
-
- printf("%s: %p - ", t->msg, p);
-
- if (p == MAP_FAILED) {
- printf("FAILED\n");
- continue;
- }
-
- if (t->low_addr_required && p >= (void *)(1UL << 47))
- printf("FAILED\n");
- else
- printf("OK\n");
- if (!t->keep_mapped)
- munmap(p, t->size);
- }
- return 0;
-}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 939a337128db..d744991c0f4f 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -5,16 +5,26 @@ include ../lib.mk
.PHONY: all all_32 all_64 warn_32bit_failure clean
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
- check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
- protection_keys test_vdso
+UNAME_M := $(shell uname -m)
+CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
+CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
+
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
+ check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
+ protection_keys test_vdso test_vsyscall
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+# Some selftests require 32bit support enabled also on 64bit systems
+TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
-TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED)
TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
+ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11)
+TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED)
+endif
+
BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
@@ -23,18 +33,28 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
-UNAME_M := $(shell uname -m)
-CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
-CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
+define gen-target-rule-32
+$(1) $(1)_32: $(OUTPUT)/$(1)_32
+.PHONY: $(1) $(1)_32
+endef
+
+define gen-target-rule-64
+$(1) $(1)_64: $(OUTPUT)/$(1)_64
+.PHONY: $(1) $(1)_64
+endef
ifeq ($(CAN_BUILD_I386),1)
all: all_32
TEST_PROGS += $(BINARIES_32)
+EXTRA_CFLAGS += -DCAN_BUILD_32
+$(foreach t,$(TARGETS_C_32BIT_ALL),$(eval $(call gen-target-rule-32,$(t))))
endif
ifeq ($(CAN_BUILD_X86_64),1)
all: all_64
TEST_PROGS += $(BINARIES_64)
+EXTRA_CFLAGS += -DCAN_BUILD_64
+$(foreach t,$(TARGETS_C_64BIT_ALL),$(eval $(call gen-target-rule-64,$(t))))
endif
all_32: $(BINARIES_32)
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 66e5ce5b91f0..1aef72df20a1 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt,
* NB: Different Linux versions do different things with the
* accessed bit in set_thread_area().
*/
- if (ar != expected_ar &&
- (ldt || ar != (expected_ar | AR_ACCESSED))) {
+ if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {
printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
(ldt ? "LDT" : "GDT"), index, ar, expected_ar);
nerrs++;
@@ -627,13 +626,10 @@ static void do_multicpu_tests(void)
static int finish_exec_test(void)
{
/*
- * In a sensible world, this would be check_invalid_segment(0, 1);
- * For better or for worse, though, the LDT is inherited across exec.
- * We can probably change this safely, but for now we test it.
+ * Older kernel versions did inherit the LDT on exec() which is
+ * wrong because exec() starts from a clean state.
*/
- check_valid_segment(0, 1,
- AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
- 42, true);
+ check_invalid_segment(0, 1);
return nerrs ? 1 : 0;
}
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index ec0f6b45ce8b..9c0325e1ea68 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
return si->si_upper;
}
#else
+
+/*
+ * This deals with old version of _sigfault in some distros:
+ *
+
+old _sigfault:
+ struct {
+ void *si_addr;
+ } _sigfault;
+
+new _sigfault:
+ struct {
+ void __user *_addr;
+ int _trapno;
+ short _addr_lsb;
+ union {
+ struct {
+ void __user *_lower;
+ void __user *_upper;
+ } _addr_bnd;
+ __u32 _pkey;
+ };
+ } _sigfault;
+ *
+ */
+
static inline void **__si_bounds_hack(siginfo_t *si)
{
void *sigfault = &si->_sifields._sigfault;
void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
- void **__si_lower = end_sigfault;
+ int *trapno = (int*)end_sigfault;
+ /* skip _trapno and _addr_lsb */
+ void **__si_lower = (void**)(trapno + 2);
return __si_lower;
}
@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
static inline void *__si_bounds_upper(siginfo_t *si)
{
- return (*__si_bounds_hack(si)) + sizeof(void *);
+ return *(__si_bounds_hack(si) + 1);
}
#endif
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index bc1b0735bb50..f15aa5a76fe3 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -393,34 +393,6 @@ pid_t fork_lazy_child(void)
return forkret;
}
-void davecmp(void *_a, void *_b, int len)
-{
- int i;
- unsigned long *a = _a;
- unsigned long *b = _b;
-
- for (i = 0; i < len / sizeof(*a); i++) {
- if (a[i] == b[i])
- continue;
-
- dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
- }
-}
-
-void dumpit(char *f)
-{
- int fd = open(f, O_RDONLY);
- char buf[100];
- int nr_read;
-
- dprintf2("maps fd: %d\n", fd);
- do {
- nr_read = read(fd, &buf[0], sizeof(buf));
- write(1, buf, nr_read);
- } while (nr_read > 0);
- close(fd);
-}
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index a48da95c18fd..ddfdd635de16 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -119,7 +119,9 @@ static void check_result(void)
int main()
{
+#ifdef CAN_BUILD_32
int tmp;
+#endif
sethandler(SIGTRAP, sigtrap, 0);
@@ -139,12 +141,13 @@ int main()
: : "c" (post_nop) : "r11");
check_result();
#endif
-
+#ifdef CAN_BUILD_32
printf("[RUN]\tSet TF and check int80\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
: INT80_CLOBBERS);
check_result();
+#endif
/*
* This test is particularly interesting if fast syscalls use
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
index bf0d687c7db7..64f11c8d9b76 100644
--- a/tools/testing/selftests/x86/test_mremap_vdso.c
+++ b/tools/testing/selftests/x86/test_mremap_vdso.c
@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
vdso_size += PAGE_SIZE;
}
+#ifdef __i386__
/* Glibc is likely to explode now - exit with raw syscall */
asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
+#else /* __x86_64__ */
+ syscall(SYS_exit, ret);
+#endif
} else {
int status;
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
index 29973cde06d3..235259011704 100644
--- a/tools/testing/selftests/x86/test_vdso.c
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -26,20 +26,59 @@
# endif
#endif
+/* max length of lines in /proc/self/maps - anything longer is skipped here */
+#define MAPS_LINE_LEN 128
+
int nerrs = 0;
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+
+getcpu_t vgetcpu;
+getcpu_t vdso_getcpu;
+
+static void *vsyscall_getcpu(void)
+{
#ifdef __x86_64__
-# define VSYS(x) (x)
+ FILE *maps;
+ char line[MAPS_LINE_LEN];
+ bool found = false;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
+ return NULL;
+
+ while (fgets(line, MAPS_LINE_LEN, maps)) {
+ char r, x;
+ void *start, *end;
+ char name[MAPS_LINE_LEN];
+
+ /* sscanf() is safe here as strlen(name) >= strlen(line) */
+ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+ &start, &end, &r, &x, name) != 5)
+ continue;
+
+ if (strcmp(name, "[vsyscall]"))
+ continue;
+
+ /* assume entries are OK, as we test vDSO here not vsyscall */
+ found = true;
+ break;
+ }
+
+ fclose(maps);
+
+ if (!found) {
+ printf("Warning: failed to find vsyscall getcpu\n");
+ return NULL;
+ }
+ return (void *) (0xffffffffff600800);
#else
-# define VSYS(x) 0
+ return NULL;
#endif
+}
-typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
-
-const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
-getcpu_t vdso_getcpu;
-void fill_function_pointers()
+static void fill_function_pointers()
{
void *vdso = dlopen("linux-vdso.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
@@ -54,6 +93,8 @@ void fill_function_pointers()
vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
if (!vdso_getcpu)
printf("Warning: failed to find getcpu in vDSO\n");
+
+ vgetcpu = (getcpu_t) vsyscall_getcpu();
}
static long sys_getcpu(unsigned * cpu, unsigned * node,
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 000000000000..be81621446f0
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,505 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/ucontext.h>
+#include <errno.h>
+#include <err.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <setjmp.h>
+
+#ifdef __x86_64__
+# define VSYS(x) (x)
+#else
+# define VSYS(x) 0
+#endif
+
+#ifndef SYS_getcpu
+# ifdef __x86_64__
+# define SYS_getcpu 309
+# else
+# define SYS_getcpu 318
+# endif
+#endif
+
+/* max length of lines in /proc/self/maps - anything longer is skipped here */
+#define MAPS_LINE_LEN 128
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ int flags)
+{
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+/* vsyscalls and vDSO */
+bool should_read_vsyscall = false;
+
+typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
+gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
+gtod_t vdso_gtod;
+
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+vgettime_t vdso_gettime;
+
+typedef long (*time_func_t)(time_t *t);
+time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
+time_func_t vdso_time;
+
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
+getcpu_t vdso_getcpu;
+
+static void init_vdso(void)
+{
+ void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso)
+ vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso) {
+ printf("[WARN]\tfailed to find vDSO\n");
+ return;
+ }
+
+ vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
+ if (!vdso_gtod)
+ printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
+
+ vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+ if (!vdso_gettime)
+ printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
+
+ vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
+ if (!vdso_time)
+ printf("[WARN]\tfailed to find time in vDSO\n");
+
+ vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
+ if (!vdso_getcpu) {
+ /* getcpu() was never wired up in the 32-bit vDSO. */
+ printf("[%s]\tfailed to find getcpu in vDSO\n",
+ sizeof(long) == 8 ? "WARN" : "NOTE");
+ }
+}
+
+static int init_vsys(void)
+{
+#ifdef __x86_64__
+ int nerrs = 0;
+ FILE *maps;
+ char line[MAPS_LINE_LEN];
+ bool found = false;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) {
+ printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
+ should_read_vsyscall = true;
+ return 0;
+ }
+
+ while (fgets(line, MAPS_LINE_LEN, maps)) {
+ char r, x;
+ void *start, *end;
+ char name[MAPS_LINE_LEN];
+
+ /* sscanf() is safe here as strlen(name) >= strlen(line) */
+ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+ &start, &end, &r, &x, name) != 5)
+ continue;
+
+ if (strcmp(name, "[vsyscall]"))
+ continue;
+
+ printf("\tvsyscall map: %s", line);
+
+ if (start != (void *)0xffffffffff600000 ||
+ end != (void *)0xffffffffff601000) {
+ printf("[FAIL]\taddress range is nonsense\n");
+ nerrs++;
+ }
+
+ printf("\tvsyscall permissions are %c-%c\n", r, x);
+ should_read_vsyscall = (r == 'r');
+ if (x != 'x') {
+ vgtod = NULL;
+ vtime = NULL;
+ vgetcpu = NULL;
+ }
+
+ found = true;
+ break;
+ }
+
+ fclose(maps);
+
+ if (!found) {
+ printf("\tno vsyscall map in /proc/self/maps\n");
+ should_read_vsyscall = false;
+ vgtod = NULL;
+ vtime = NULL;
+ vgetcpu = NULL;
+ }
+
+ return nerrs;
+#else
+ return 0;
+#endif
+}
+
+/* syscalls */
+static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
+{
+ return syscall(SYS_gettimeofday, tv, tz);
+}
+
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+ return syscall(SYS_clock_gettime, id, ts);
+}
+
+static inline long sys_time(time_t *t)
+{
+ return syscall(SYS_time, t);
+}
+
+static inline long sys_getcpu(unsigned * cpu, unsigned * node,
+ void* cache)
+{
+ return syscall(SYS_getcpu, cpu, node, cache);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+ siglongjmp(jmpbuf, 1);
+}
+
+static double tv_diff(const struct timeval *a, const struct timeval *b)
+{
+ return (double)(a->tv_sec - b->tv_sec) +
+ (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
+}
+
+static int check_gtod(const struct timeval *tv_sys1,
+ const struct timeval *tv_sys2,
+ const struct timezone *tz_sys,
+ const char *which,
+ const struct timeval *tv_other,
+ const struct timezone *tz_other)
+{
+ int nerrs = 0;
+ double d1, d2;
+
+ if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
+ printf("[FAIL] %s tz mismatch\n", which);
+ nerrs++;
+ }
+
+ d1 = tv_diff(tv_other, tv_sys1);
+ d2 = tv_diff(tv_sys2, tv_other);
+ printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
+
+ if (d1 < 0 || d2 < 0) {
+ printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
+ nerrs++;
+ } else {
+ printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
+ }
+
+ return nerrs;
+}
+
+static int test_gtod(void)
+{
+ struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
+ struct timezone tz_sys, tz_vdso, tz_vsys;
+ long ret_vdso = -1;
+ long ret_vsys = -1;
+ int nerrs = 0;
+
+ printf("[RUN]\ttest gettimeofday()\n");
+
+ if (sys_gtod(&tv_sys1, &tz_sys) != 0)
+ err(1, "syscall gettimeofday");
+ if (vdso_gtod)
+ ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
+ if (vgtod)
+ ret_vsys = vgtod(&tv_vsys, &tz_vsys);
+ if (sys_gtod(&tv_sys2, &tz_sys) != 0)
+ err(1, "syscall gettimeofday");
+
+ if (vdso_gtod) {
+ if (ret_vdso == 0) {
+ nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
+ } else {
+ printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
+ nerrs++;
+ }
+ }
+
+ if (vgtod) {
+ if (ret_vsys == 0) {
+ nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
+ } else {
+ printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
+ nerrs++;
+ }
+ }
+
+ return nerrs;
+}
+
+static int test_time(void) {
+ int nerrs = 0;
+
+ printf("[RUN]\ttest time()\n");
+ long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
+ long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
+ t_sys1 = sys_time(&t2_sys1);
+ if (vdso_time)
+ t_vdso = vdso_time(&t2_vdso);
+ if (vtime)
+ t_vsys = vtime(&t2_vsys);
+ t_sys2 = sys_time(&t2_sys2);
+ if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
+ printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
+ nerrs++;
+ return nerrs;
+ }
+
+ if (vdso_time) {
+ if (t_vdso < 0 || t_vdso != t2_vdso) {
+ printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
+ nerrs++;
+ } else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
+ printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
+ nerrs++;
+ } else {
+ printf("[OK]\tvDSO time() is okay\n");
+ }
+ }
+
+ if (vtime) {
+ if (t_vsys < 0 || t_vsys != t2_vsys) {
+ printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
+ nerrs++;
+ } else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
+ printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
+ nerrs++;
+ } else {
+ printf("[OK]\tvsyscall time() is okay\n");
+ }
+ }
+
+ return nerrs;
+}
+
+static int test_getcpu(int cpu)
+{
+ int nerrs = 0;
+ long ret_sys, ret_vdso = -1, ret_vsys = -1;
+
+ printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
+
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+ printf("[SKIP]\tfailed to force CPU %d\n", cpu);
+ return nerrs;
+ }
+
+ unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
+ unsigned node = 0;
+ bool have_node = false;
+ ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
+ if (vdso_getcpu)
+ ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
+ if (vgetcpu)
+ ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
+
+ if (ret_sys == 0) {
+ if (cpu_sys != cpu) {
+ printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
+ nerrs++;
+ }
+
+ have_node = true;
+ node = node_sys;
+ }
+
+ if (vdso_getcpu) {
+ if (ret_vdso) {
+ printf("[FAIL]\tvDSO getcpu() failed\n");
+ nerrs++;
+ } else {
+ if (!have_node) {
+ have_node = true;
+ node = node_vdso;
+ }
+
+ if (cpu_vdso != cpu) {
+ printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
+ nerrs++;
+ } else {
+ printf("[OK]\tvDSO reported correct CPU\n");
+ }
+
+ if (node_vdso != node) {
+ printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
+ nerrs++;
+ } else {
+ printf("[OK]\tvDSO reported correct node\n");
+ }
+ }
+ }
+
+ if (vgetcpu) {
+ if (ret_vsys) {
+ printf("[FAIL]\tvsyscall getcpu() failed\n");
+ nerrs++;
+ } else {
+ if (!have_node) {
+ have_node = true;
+ node = node_vsys;
+ }
+
+ if (cpu_vsys != cpu) {
+ printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
+ nerrs++;
+ } else {
+ printf("[OK]\tvsyscall reported correct CPU\n");
+ }
+
+ if (node_vsys != node) {
+ printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
+ nerrs++;
+ } else {
+ printf("[OK]\tvsyscall reported correct node\n");
+ }
+ }
+ }
+
+ return nerrs;
+}
+
+static int test_vsys_r(void)
+{
+#ifdef __x86_64__
+ printf("[RUN]\tChecking read access to the vsyscall page\n");
+ bool can_read;
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ *(volatile int *)0xffffffffff600000;
+ can_read = true;
+ } else {
+ can_read = false;
+ }
+
+ if (can_read && !should_read_vsyscall) {
+ printf("[FAIL]\tWe have read access, but we shouldn't\n");
+ return 1;
+ } else if (!can_read && should_read_vsyscall) {
+ printf("[FAIL]\tWe don't have read access, but we should\n");
+ return 1;
+ } else {
+ printf("[OK]\tgot expected result\n");
+ }
+#endif
+
+ return 0;
+}
+
+
+#ifdef __x86_64__
+#define X86_EFLAGS_TF (1UL << 8)
+static volatile sig_atomic_t num_vsyscall_traps;
+
+static unsigned long get_eflags(void)
+{
+ unsigned long eflags;
+ asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
+ return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+ asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+ ucontext_t *ctx = (ucontext_t *)ctx_void;
+ unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
+
+ if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
+ num_vsyscall_traps++;
+}
+
+static int test_native_vsyscall(void)
+{
+ time_t tmp;
+ bool is_native;
+
+ if (!vtime)
+ return 0;
+
+ printf("[RUN]\tchecking for native vsyscall\n");
+ sethandler(SIGTRAP, sigtrap, 0);
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ vtime(&tmp);
+ set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+
+ /*
+ * If vsyscalls are emulated, we expect a single trap in the
+ * vsyscall page -- the call instruction will trap with RIP
+ * pointing to the entry point before emulation takes over.
+ * In native mode, we expect two traps, since whatever code
+ * the vsyscall page contains will be more than just a ret
+ * instruction.
+ */
+ is_native = (num_vsyscall_traps > 1);
+
+ printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
+ (is_native ? "native" : "emulated"),
+ (int)num_vsyscall_traps);
+
+ return 0;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+ int nerrs = 0;
+
+ init_vdso();
+ nerrs += init_vsys();
+
+ nerrs += test_gtod();
+ nerrs += test_time();
+ nerrs += test_getcpu(0);
+ nerrs += test_getcpu(1);
+
+ sethandler(SIGSEGV, sigsegv, 0);
+ nerrs += test_vsys_r();
+
+#ifdef __x86_64__
+ nerrs += test_native_vsyscall();
+#endif
+
+ return nerrs ? 1 : 0;
+}
diff --git a/tools/usb/usbip/libsrc/usbip_device_driver.c b/tools/usb/usbip/libsrc/usbip_device_driver.c
index e059b7d1ec5b..ec3a0b794f15 100644
--- a/tools/usb/usbip/libsrc/usbip_device_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_device_driver.c
@@ -77,7 +77,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
const char *path, *name;
char filepath[SYSFS_PATH_MAX];
struct usb_device_descriptor descr;
- unsigned i;
+ unsigned int i;
FILE *fd = NULL;
struct udev_device *plat;
const char *speed;
@@ -92,7 +92,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
return -1;
ret = fread((char *) &descr, sizeof(descr), 1, fd);
if (ret < 0)
- return -1;
+ goto err;
fclose(fd);
copy_descr_attr(dev, &descr, bDeviceClass);
@@ -124,6 +124,9 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
name = udev_device_get_sysname(plat);
strncpy(dev->busid, name, SYSFS_BUS_ID_SIZE);
return 0;
+err:
+ fclose(fd);
+ return -1;
}
static int is_my_device(struct udev_device *dev)
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 5727dfb15a83..c9c81614a66a 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
while (*c != '\0') {
int port, status, speed, devid;
- unsigned long socket;
+ int sockfd;
char lbusid[SYSFS_BUS_ID_SIZE];
struct usbip_imported_device *idev;
char hub[3];
- ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n",
+ ret = sscanf(c, "%2s %d %d %d %x %u %31s\n",
hub, &port, &status, &speed,
- &devid, &socket, lbusid);
+ &devid, &sockfd, lbusid);
if (ret < 5) {
dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
dbg("hub %s port %d status %d speed %d devid %x",
hub, port, status, speed, devid);
- dbg("socket %lx lbusid %s", socket, lbusid);
+ dbg("sockfd %u lbusid %s", sockfd, lbusid);
/* if a device is connected, look at it */
idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
return 0;
}
-#define MAX_STATUS_NAME 16
+#define MAX_STATUS_NAME 18
static int refresh_imported_device_list(void)
{
@@ -329,9 +329,17 @@ err:
int usbip_vhci_get_free_port(uint32_t speed)
{
for (int i = 0; i < vhci_driver->nports; i++) {
- if (speed == USB_SPEED_SUPER &&
- vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
- continue;
+
+ switch (speed) {
+ case USB_SPEED_SUPER:
+ if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
+ continue;
+ break;
+ default:
+ if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
+ continue;
+ break;
+ }
if (vhci_driver->idev[i].status == VDEV_ST_NULL)
return vhci_driver->idev[i].port;
diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
index fa46141ae68b..e121cfb1746a 100644
--- a/tools/usb/usbip/src/usbip_bind.c
+++ b/tools/usb/usbip/src/usbip_bind.c
@@ -144,6 +144,7 @@ static int bind_device(char *busid)
int rc;
struct udev *udev;
struct udev_device *dev;
+ const char *devpath;
/* Check whether the device with this bus ID exists. */
udev = udev_new();
@@ -152,8 +153,16 @@ static int bind_device(char *busid)
err("device with the specified bus ID does not exist");
return -1;
}
+ devpath = udev_device_get_devpath(dev);
udev_unref(udev);
+ /* If the device is already attached to vhci_hcd - bail out */
+ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
+ err("bind loop detected: device: %s is attached to %s\n",
+ devpath, USBIP_VHCI_DRV_NAME);
+ return -1;
+ }
+
rc = unbind_other(busid);
if (rc == UNBIND_ST_FAILED) {
err("could not unbind driver from device on busid %s", busid);
diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
index f1b38e866dd7..d65a9f444174 100644
--- a/tools/usb/usbip/src/usbip_list.c
+++ b/tools/usb/usbip/src/usbip_list.c
@@ -187,6 +187,7 @@ static int list_devices(bool parsable)
const char *busid;
char product_name[128];
int ret = -1;
+ const char *devpath;
/* Create libudev context. */
udev = udev_new();
@@ -209,6 +210,14 @@ static int list_devices(bool parsable)
path = udev_list_entry_get_name(dev_list_entry);
dev = udev_device_new_from_syspath(udev, path);
+ /* Ignore devices attached to vhci_hcd */
+ devpath = udev_device_get_devpath(dev);
+ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
+ dbg("Skip the device %s already attached to %s\n",
+ devpath, USBIP_VHCI_DRV_NAME);
+ continue;
+ }
+
/* Get device information. */
idVendor = udev_device_get_sysattr_value(dev, "idVendor");
idProduct = udev_device_get_sysattr_value(dev, "idProduct");
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index 009afb4a3aae..c6dad2a13c80 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -456,7 +456,7 @@ static void set_signal(void)
sigaction(SIGTERM, &act, NULL);
sigaction(SIGINT, &act, NULL);
act.sa_handler = SIG_IGN;
- sigaction(SIGCLD, &act, NULL);
+ sigaction(SIGCHLD, &act, NULL);
}
static const char *pid_file;
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 2b3d6d235015..3d7b42e77299 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
char command[SYSFS_BUS_ID_SIZE + 4];
char match_busid_attr_path[SYSFS_PATH_MAX];
int rc;
+ int cmd_size;
snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
"%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
attr_name);
if (add)
- snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+ cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
+ busid);
else
- snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
+ cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
+ busid);
rc = write_sysfs_attribute(match_busid_attr_path, command,
- sizeof(command));
+ cmd_size);
if (rc < 0) {
dbg("failed to write match_busid: %s", strerror(errno));
return -1;
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 395521a7a8d8..fca8381bbe04 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -118,7 +118,7 @@ static inline void free_page(unsigned long addr)
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
+#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
diff --git a/tools/virtio/linux/thread_info.h b/tools/virtio/linux/thread_info.h
new file mode 100644
index 000000000000..e0f610d08006
--- /dev/null
+++ b/tools/virtio/linux/thread_info.h
@@ -0,0 +1 @@
+#define check_copy_size(A, B, C) (1)
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 5706e075adf2..301d59bfcd0a 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -111,7 +111,7 @@ static inline void busy_wait(void)
}
#if defined(__x86_64__) || defined(__i386__)
-#define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
+#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
#else
/*
* Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
@@ -134,4 +134,61 @@ static inline void busy_wait(void)
barrier(); \
} while (0)
+#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
+#define smp_wmb() barrier()
+#else
+#define smp_wmb() smp_release()
+#endif
+
+#ifdef __alpha__
+#define smp_read_barrier_depends() smp_acquire()
+#else
+#define smp_read_barrier_depends() do {} while(0)
+#endif
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
+{
+ switch (size) { \
+ case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break; \
+ case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break; \
+ case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break; \
+ case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break; \
+ default: \
+ barrier(); \
+ __builtin_memcpy((void *)res, (const void *)p, size); \
+ barrier(); \
+ } \
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
+ case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
+ case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
+ case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ barrier();
+ }
+}
+
+#define READ_ONCE(x) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u; \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
+ __u.__val; \
+})
+
+#define WRITE_ONCE(x, val) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (typeof(x)) (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
#endif
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 38bb171aceba..477899c12c51 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -16,24 +16,41 @@
#define unlikely(x) (__builtin_expect(!!(x), 0))
#define likely(x) (__builtin_expect(!!(x), 1))
#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
+#define SIZE_MAX (~(size_t)0)
+
typedef pthread_spinlock_t spinlock_t;
typedef int gfp_t;
-static void *kmalloc(unsigned size, gfp_t gfp)
-{
- return memalign(64, size);
-}
+#define __GFP_ZERO 0x1
-static void *kzalloc(unsigned size, gfp_t gfp)
+static void *kmalloc(unsigned size, gfp_t gfp)
{
void *p = memalign(64, size);
if (!p)
return p;
- memset(p, 0, size);
+ if (gfp & __GFP_ZERO)
+ memset(p, 0, size);
return p;
}
+static inline void *kzalloc(unsigned size, gfp_t flags)
+{
+ return kmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ return kmalloc(n * size, flags);
+}
+
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
static void kfree(void *p)
{
if (p)
@@ -170,7 +187,7 @@ bool enable_kick()
bool avail_empty()
{
- return !__ptr_ring_peek(&array);
+ return __ptr_ring_empty(&array);
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index 747c5dd47be8..5a41404aaef5 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -84,12 +84,11 @@ void alloc_ring(void)
perror("Unable to allocate ring buffer.\n");
exit(3);
}
- event = malloc(sizeof *event);
+ event = calloc(1, sizeof(*event));
if (!event) {
perror("Unable to allocate event buffer.\n");
exit(3);
}
- memset(event, 0, sizeof *event);
guest.avail_idx = 0;
guest.kicked_avail_idx = -1;
guest.last_used_idx = 0;
@@ -102,12 +101,11 @@ void alloc_ring(void)
ring[i] = desc;
}
guest.num_free = ring_size;
- data = malloc(ring_size * sizeof *data);
+ data = calloc(ring_size, sizeof(*data));
if (!data) {
perror("Unable to allocate data buffer.\n");
exit(3);
}
- memset(data, 0, ring_size * sizeof *data);
}
/* guest side */
@@ -188,16 +186,18 @@ bool enable_call()
void kick_available(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier C (for pairing) */
smp_mb();
- if (!need_event(event->kick_index,
- guest.avail_idx,
- guest.kicked_avail_idx))
- return;
+ need = need_event(event->kick_index,
+ guest.avail_idx,
+ guest.kicked_avail_idx);
guest.kicked_avail_idx = guest.avail_idx;
- kick();
+ if (need)
+ kick();
}
/* host side */
@@ -253,14 +253,18 @@ bool use_buf(unsigned *lenp, void **bufp)
void call_used(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier D (for pairing) */
smp_mb();
- if (!need_event(event->call_index,
+
+ need = need_event(event->call_index,
host.used_idx,
- host.called_used_idx))
- return;
+ host.called_used_idx);
host.called_used_idx = host.used_idx;
- call();
+
+ if (need)
+ call();
}
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index bbc3043b2fb1..5fd3fbcb9e57 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -225,16 +225,18 @@ bool enable_call()
void kick_available(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier C (for pairing) */
smp_mb();
- if (!vring_need_event(vring_avail_event(&ring),
- guest.avail_idx,
- guest.kicked_avail_idx))
- return;
+ need = vring_need_event(vring_avail_event(&ring),
+ guest.avail_idx,
+ guest.kicked_avail_idx);
guest.kicked_avail_idx = guest.avail_idx;
- kick();
+ if (need)
+ kick();
}
/* host side */
@@ -316,14 +318,16 @@ bool use_buf(unsigned *lenp, void **bufp)
void call_used(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier D (for pairing) */
smp_mb();
- if (!vring_need_event(vring_used_event(&ring),
- host.used_idx,
- host.called_used_idx))
- return;
+ need = vring_need_event(vring_used_event(&ring),
+ host.used_idx,
+ host.called_used_idx);
host.called_used_idx = host.used_idx;
- call();
+ if (need)
+ call();
}
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index e92903fc7113..a8783f48f77f 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -169,9 +169,10 @@ static int opt_raw; /* for kernel developers */
static int opt_list; /* list pages (in ranges) */
static int opt_no_summary; /* don't show summary */
static pid_t opt_pid; /* process to walk */
-const char * opt_file; /* file or directory path */
+const char *opt_file; /* file or directory path */
static uint64_t opt_cgroup; /* cgroup inode */
static int opt_list_cgroup;/* list page cgroup */
+static const char *opt_kpageflags;/* kpageflags file to parse */
#define MAX_ADDR_RANGES 1024
static int nr_addr_ranges;
@@ -258,7 +259,7 @@ static int checked_open(const char *pathname, int flags)
* pagemap/kpageflags routines
*/
-static unsigned long do_u64_read(int fd, char *name,
+static unsigned long do_u64_read(int fd, const char *name,
uint64_t *buf,
unsigned long index,
unsigned long count)
@@ -283,7 +284,7 @@ static unsigned long kpageflags_read(uint64_t *buf,
unsigned long index,
unsigned long pages)
{
- return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages);
+ return do_u64_read(kpageflags_fd, opt_kpageflags, buf, index, pages);
}
static unsigned long kpagecgroup_read(uint64_t *buf,
@@ -293,7 +294,7 @@ static unsigned long kpagecgroup_read(uint64_t *buf,
if (kpagecgroup_fd < 0)
return pages;
- return do_u64_read(kpagecgroup_fd, PROC_KPAGEFLAGS, buf, index, pages);
+ return do_u64_read(kpagecgroup_fd, opt_kpageflags, buf, index, pages);
}
static unsigned long pagemap_read(uint64_t *buf,
@@ -743,7 +744,7 @@ static void walk_addr_ranges(void)
{
int i;
- kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
+ kpageflags_fd = checked_open(opt_kpageflags, O_RDONLY);
if (!nr_addr_ranges)
add_addr_range(0, ULONG_MAX);
@@ -790,6 +791,7 @@ static void usage(void)
" -N|--no-summary Don't show summary info\n"
" -X|--hwpoison hwpoison pages\n"
" -x|--unpoison unpoison pages\n"
+" -F|--kpageflags filename kpageflags file to parse\n"
" -h|--help Show this usage message\n"
"flags:\n"
" 0x10 bitfield format, e.g.\n"
@@ -1013,7 +1015,7 @@ static void walk_page_cache(void)
{
struct stat st;
- kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
+ kpageflags_fd = checked_open(opt_kpageflags, O_RDONLY);
pagemap_fd = checked_open("/proc/self/pagemap", O_RDONLY);
sigaction(SIGBUS, &sigbus_action, NULL);
@@ -1164,6 +1166,11 @@ static void parse_bits_mask(const char *optarg)
add_bits_filter(mask, bits);
}
+static void parse_kpageflags(const char *name)
+{
+ opt_kpageflags = name;
+}
+
static void describe_flags(const char *optarg)
{
uint64_t flags = parse_flag_names(optarg, 0);
@@ -1188,6 +1195,7 @@ static const struct option opts[] = {
{ "no-summary", 0, NULL, 'N' },
{ "hwpoison" , 0, NULL, 'X' },
{ "unpoison" , 0, NULL, 'x' },
+ { "kpageflags", 0, NULL, 'F' },
{ "help" , 0, NULL, 'h' },
{ NULL , 0, NULL, 0 }
};
@@ -1199,7 +1207,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
while ((c = getopt_long(argc, argv,
- "rp:f:a:b:d:c:ClLNXxh", opts, NULL)) != -1) {
+ "rp:f:a:b:d:c:ClLNXxF:h", opts, NULL)) != -1) {
switch (c) {
case 'r':
opt_raw = 1;
@@ -1242,6 +1250,9 @@ int main(int argc, char *argv[])
opt_unpoison = 1;
prepare_hwpoison_fd();
break;
+ case 'F':
+ parse_kpageflags(optarg);
+ break;
case 'h':
usage();
exit(0);
@@ -1251,6 +1262,9 @@ int main(int argc, char *argv[])
}
}
+ if (!opt_kpageflags)
+ opt_kpageflags = PROC_KPAGEFLAGS;
+
if (opt_cgroup || opt_list_cgroup)
kpagecgroup_fd = checked_open(PROC_KPAGECGROUP, O_RDONLY);
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 35b039864b77..0cf28aa6f21c 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Sergey Senozhatsky, 2015
# sergey.senozhatsky.work@gmail.com