diff options
author | Linus Torvalds | 2021-11-01 20:05:19 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-11-01 20:05:19 -0700 |
commit | 79ef0c00142519bc34e1341447f3797436cc48bf (patch) | |
tree | 200c1ada8b4f31b79ec8a67939ca5fbcd0339958 /arch/s390 | |
parent | d54f486035fd89f14845a7f34a97a3f5da4e70f2 (diff) | |
parent | feea69ec121f067073868cebe0cb9d003e64ad80 (diff) |
Merge tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
- kprobes: Restructured stack unwinder to show properly on x86 when a
stack dump happens from a kretprobe callback.
- Fix to bootconfig parsing
- Have tracefs allow owner and group permissions by default (only
denying others). There's been pressure to allow non root to tracefs
in a controlled fashion, and using groups is probably the safest.
- Bootconfig memory managament updates.
- Bootconfig clean up to have the tools directory be less dependent on
changes in the kernel tree.
- Allow perf to be traced by function tracer.
- Rewrite of function graph tracer to be a callback from the function
tracer instead of having its own trampoline (this change will happen
on an arch by arch basis, and currently only x86_64 implements it).
- Allow multiple direct trampolines (bpf hooks to functions) be batched
together in one synchronization.
- Allow histogram triggers to add variables that can perform
calculations against the event's fields.
- Use the linker to determine architecture callbacks from the ftrace
trampoline to allow for proper parameter prototypes and prevent
warnings from the compiler.
- Extend histogram triggers to key off of variables.
- Have trace recursion use bit magic to determine preempt context over
if branches.
- Have trace recursion disable preemption as all use cases do anyway.
- Added testing for verification of tracing utilities.
- Various small clean ups and fixes.
* tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (101 commits)
tracing/histogram: Fix semicolon.cocci warnings
tracing/histogram: Fix documentation inline emphasis warning
tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together
tracing: Show size of requested perf buffer
bootconfig: Initialize ret in xbc_parse_tree()
ftrace: do CPU checking after preemption disabled
ftrace: disable preemption when recursion locked
tracing/histogram: Document expression arithmetic and constants
tracing/histogram: Optimize division by a power of 2
tracing/histogram: Covert expr to const if both operands are constants
tracing/histogram: Simplify handling of .sym-offset in expressions
tracing: Fix operator precedence for hist triggers expression
tracing: Add division and multiplication support for hist triggers
tracing: Add support for creating hist trigger variables from literal
selftests/ftrace: Stop tracing while reading the trace file by default
MAINTAINERS: Update KPROBES and TRACING entries
test_kprobes: Move it from kernel/ to lib/
docs, kprobes: Remove invalid URL and add new reference
samples/kretprobes: Fix return value if register_kretprobe() failed
lib/bootconfig: Fix the xbc_get_info kerneldoc
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kprobes.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 16 | ||||
-rw-r--r-- | arch/s390/kernel/stacktrace.c | 2 |
4 files changed, 11 insertions, 14 deletions
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 09cdb632a490..5eb722c984e4 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -70,7 +70,7 @@ struct kprobe_ctlblk { }; void arch_remove_kprobe(struct kprobe *p); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); int kprobe_fault_handler(struct pt_regs *regs, int trapnr); int kprobe_exceptions_notify(struct notifier_block *self, diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 1d94ffdf347b..5165bf344f95 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -262,11 +262,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return 0; } -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} - void arch_ftrace_update_code(int command) { if (ftrace_shared_hotpatch_trampoline(NULL)) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 52d056a5f89f..c505c0ee5f47 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -7,6 +7,8 @@ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> */ +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/moduleloader.h> #include <linux/kprobes.h> #include <linux/ptrace.h> @@ -240,7 +242,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) ri->fp = NULL; /* Replace the return addr with trampoline addr */ - regs->gprs[14] = (unsigned long) &kretprobe_trampoline; + regs->gprs[14] = (unsigned long) &__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_prepare_kretprobe); @@ -259,7 +261,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) * is a BUG. The code path resides in the .kprobes.text * section and is executed with interrupts disabled. */ - pr_err("Invalid kprobe detected.\n"); + pr_err("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); BUG(); } @@ -332,8 +334,8 @@ NOKPROBE_SYMBOL(kprobe_handler); */ static void __used kretprobe_trampoline_holder(void) { - asm volatile(".global kretprobe_trampoline\n" - "kretprobe_trampoline: bcr 0,0\n"); + asm volatile(".global __kretprobe_trampoline\n" + "__kretprobe_trampoline: bcr 0,0\n"); } /* @@ -341,7 +343,7 @@ static void __used kretprobe_trampoline_holder(void) */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); + regs->psw.addr = __kretprobe_trampoline_handler(regs, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -507,7 +509,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, NOKPROBE_SYMBOL(kprobe_exceptions_notify); static struct kprobe trampoline = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; @@ -518,6 +520,6 @@ int __init arch_init_kprobes(void) int arch_trampoline_kprobe(struct kprobe *p) { - return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; + return p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline; } NOKPROBE_SYMBOL(arch_trampoline_kprobe); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 101477b3e263..b7bb1981e9ee 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -46,7 +46,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, * Mark stacktraces with kretprobed functions on them * as unreliable. */ - if (state.ip == (unsigned long)kretprobe_trampoline) + if (state.ip == (unsigned long)__kretprobe_trampoline) return -EINVAL; #endif |