aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds2020-12-17 13:22:17 -0800
committerLinus Torvalds2020-12-17 13:22:17 -0800
commit09c0796adf0c793462fda1d7c8c43324551405c7 (patch)
tree90893d337b215482f059dd7e522279b23ffa0961 /arch
parent312dcaf967219effe0483785f24e4072a5bed9a5 (diff)
parentf6a694665f132cbf6e2222dd2f173dc35330a8aa (diff)
Merge tag 'trace-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "The major update to this release is that there's a new arch config option called CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS. Currently, only x86_64 enables it. All the ftrace callbacks now take a struct ftrace_regs instead of a struct pt_regs. If the architecture has HAVE_DYNAMIC_FTRACE_WITH_ARGS enabled, then the ftrace_regs will have enough information to read the arguments of the function being traced, as well as access to the stack pointer. This way, if a user (like live kernel patching) only cares about the arguments, then it can avoid using the heavier weight "regs" callback, that puts in enough information in the struct ftrace_regs to simulate a breakpoint exception (needed for kprobes). A new config option that audits the timestamps of the ftrace ring buffer at most every event recorded. Ftrace recursion protection has been cleaned up to move the protection to the callback itself (this saves on an extra function call for those callbacks). Perf now handles its own RCU protection and does not depend on ftrace to do it for it (saving on that extra function call). New debug option to add "recursed_functions" file to tracefs that lists all the places that triggered the recursion protection of the function tracer. This will show where things need to be fixed as recursion slows down the function tracer. The eval enum mapping updates done at boot up are now offloaded to a work queue, as it caused a noticeable pause on slow embedded boards. Various clean ups and last minute fixes" * tag 'trace-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits) tracing: Offload eval map updates to a work queue Revert: "ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS" ring-buffer: Add rb_check_bpage in __rb_allocate_pages ring-buffer: Fix two typos in comments tracing: Drop unneeded assignment in ring_buffer_resize() tracing: Disable ftrace selftests when any tracer is running seq_buf: Avoid type mismatch for seq_buf_init ring-buffer: Fix a typo in function description ring-buffer: Remove obsolete rb_event_is_commit() ring-buffer: Add test to validate the time stamp deltas ftrace/documentation: Fix RST C code blocks tracing: Clean up after filter logic rewriting tracing: Remove the useless value assignment in test_create_synth_event() livepatch: Use the default ftrace_ops instead of REGS when ARGS is available ftrace/x86: Allow for arguments to be passed in to ftrace_regs by default ftrace: Have the callbacks receive a struct ftrace_regs instead of pt_regs MAINTAINERS: assign ./fs/tracefs to TRACING tracing: Fix some typos in comments ftrace: Remove unused varible 'ret' ring-buffer: Add recording of ring buffer recursion into recursed_functions ...
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig16
-rw-r--r--arch/csky/kernel/probes/ftrace.c16
-rw-r--r--arch/nds32/kernel/ftrace.c4
-rw-r--r--arch/parisc/kernel/ftrace.c24
-rw-r--r--arch/powerpc/include/asm/livepatch.h4
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c15
-rw-r--r--arch/s390/include/asm/livepatch.h5
-rw-r--r--arch/s390/kernel/ftrace.c20
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/ftrace.h18
-rw-r--r--arch/x86/include/asm/livepatch.h4
-rw-r--r--arch/x86/kernel/ftrace_64.S15
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c15
13 files changed, 131 insertions, 26 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index d4bdc19ed3ad..a08999dfcf16 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -143,6 +143,22 @@ config UPROBES
managed by the kernel and kept transparent to the probed
application. )
+config HAVE_64BIT_ALIGNED_ACCESS
+ def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ Some architectures require 64 bit accesses to be 64 bit
+ aligned, which also requires structs containing 64 bit values
+ to be 64 bit aligned too. This includes some 32 bit
+ architectures which can do 64 bit accesses, as well as 64 bit
+ architectures without unaligned access.
+
+ This symbol should be selected by an architecture if 64 bit
+ accesses are required to be 64 bit aligned in this way even
+ though it is not a 64 bit architecture.
+
+ See Documentation/unaligned-memory-access.txt for more
+ information on the topic of unaligned memory accesses.
+
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
help
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
index 5264763d05be..ae2b1c7b3b5c 100644
--- a/arch/csky/kernel/probes/ftrace.c
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -11,18 +11,25 @@ int arch_check_ftrace_location(struct kprobe *p)
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
+ int bit;
bool lr_saver = false;
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ struct pt_regs *regs;
- /* Preempt is disabled by ftrace */
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ regs = ftrace_get_regs(fregs);
+ preempt_disable_notrace();
p = get_kprobe((kprobe_opcode_t *)ip);
if (!p) {
p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
if (unlikely(!p) || kprobe_disabled(p))
- return;
+ goto out;
lr_saver = true;
}
@@ -56,6 +63,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
*/
__this_cpu_write(current_kprobe, NULL);
}
+out:
+ preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index 3763b3f8c3db..414f8a780cc3 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -10,7 +10,7 @@ extern void (*ftrace_trace_function)(unsigned long, unsigned long,
extern void ftrace_graph_caller(void);
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
__asm__ (""); /* avoid to optimize as pure function */
}
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(_mcount);
#else /* CONFIG_DYNAMIC_FTRACE */
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs)
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
__asm__ (""); /* avoid to optimize as pure function */
}
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 63e3ecb9da81..0a1e75af5382 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -51,7 +51,7 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3,
- struct pt_regs *regs)
+ struct ftrace_regs *fregs)
{
#ifndef CONFIG_DYNAMIC_FTRACE
extern ftrace_func_t ftrace_trace_function;
@@ -61,7 +61,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED &&
ftrace_trace_function != ftrace_stub)
ftrace_trace_function(self_addr, parent,
- function_trace_op, regs);
+ function_trace_op, fregs);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (dereference_function_descriptor(ftrace_graph_return) !=
@@ -204,17 +204,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe_ctlblk *kcb;
- struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
+ struct pt_regs *regs;
+ struct kprobe *p;
+ int bit;
- if (unlikely(!p) || kprobe_disabled(p))
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
return;
+ regs = ftrace_get_regs(fregs);
+ preempt_disable_notrace();
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
- return;
+ goto out;
}
__this_cpu_write(current_kprobe, p);
@@ -235,6 +244,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
}
}
__this_cpu_write(current_kprobe, NULL);
+out:
+ preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 4a3d5d25fed5..ae25e6e72997 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -12,8 +12,10 @@
#include <linux/sched/task_stack.h>
#ifdef CONFIG_LIVEPATCH
-static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
+
regs->nip = ip;
}
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 972cb28174b2..660138f6c4b2 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -14,14 +14,22 @@
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ struct pt_regs *regs;
+ int bit;
+ bit = ftrace_test_recursion_trylock(nip, parent_nip);
+ if (bit < 0)
+ return;
+
+ regs = ftrace_get_regs(fregs);
+ preempt_disable_notrace();
p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p))
- return;
+ goto out;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
@@ -52,6 +60,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
*/
__this_cpu_write(current_kprobe, NULL);
}
+out:
+ preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 818612b784cd..d578a8c76676 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -11,10 +11,13 @@
#ifndef ASM_LIVEPATCH_H
#define ASM_LIVEPATCH_H
+#include <linux/ftrace.h>
#include <asm/ptrace.h>
-static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
+
regs->psw.addr = ip;
}
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index ebc1284a618b..c6ddeb5029b4 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -163,17 +163,26 @@ int ftrace_disable_ftrace_graph_caller(void)
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe_ctlblk *kcb;
- struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
+ struct pt_regs *regs;
+ struct kprobe *p;
+ int bit;
- if (unlikely(!p) || kprobe_disabled(p))
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
return;
+ regs = ftrace_get_regs(fregs);
+ preempt_disable_notrace();
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
- return;
+ goto out;
}
__this_cpu_write(current_kprobe, p);
@@ -193,6 +202,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
}
}
__this_cpu_write(current_kprobe, NULL);
+out:
+ preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8bd298e45b1..7b6dd10b162a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -171,6 +171,7 @@ config X86
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 84b9449be080..9f3130f40807 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -41,6 +41,24 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned
regs->orig_ax = addr;
}
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+
+static __always_inline struct pt_regs *
+arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ /* Only when FL_SAVE_REGS is set, cs will be non zero */
+ if (!fregs->regs.cs)
+ return NULL;
+ return &fregs->regs;
+}
+
+#define ftrace_instruction_pointer_set(fregs, _ip) \
+ do { (fregs)->regs.ip = (_ip); } while (0)
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 1fde1ab6559e..7c5cc6660e4b 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -12,9 +12,9 @@
#include <asm/setup.h>
#include <linux/ftrace.h>
-static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
- regs->ip = ip;
+ ftrace_instruction_pointer_set(fregs, ip);
}
#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index ac3d5f22fe64..0d54099c2a3a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -140,16 +140,27 @@ SYM_FUNC_START(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
save_mcount_regs
+ /* Stack - skipping return address of ftrace_caller */
+ leaq MCOUNT_REG_SIZE+8(%rsp), %rcx
+ movq %rcx, RSP(%rsp)
+
SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx
- /* regs go into 4th parameter (but make it NULL) */
- movq $0, %rcx
+ /* regs go into 4th parameter */
+ leaq (%rsp), %rcx
+
+ /* Only ops with REGS flag set should have CS register set */
+ movq $0, CS(%rsp)
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
+ /* Handlers can change the RIP */
+ movq RIP(%rsp), %rax
+ movq %rax, MCOUNT_REG_SIZE(%rsp)
+
restore_mcount_regs
/*
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 681a4b36e9bb..373e5fa3ce1f 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -14,15 +14,21 @@
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
+ struct pt_regs *regs = ftrace_get_regs(fregs);
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ int bit;
- /* Preempt is disabled by ftrace */
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ preempt_disable_notrace();
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
- return;
+ goto out;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
@@ -52,6 +58,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
*/
__this_cpu_write(current_kprobe, NULL);
}
+out:
+ preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);