diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/Kconfig.debug | 11 | ||||
-rw-r--r-- | arch/sh/include/asm/ftrace.h | 3 | ||||
-rw-r--r-- | arch/sh/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/sh/kernel/Makefile_32 | 1 | ||||
-rw-r--r-- | arch/sh/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/sh/kernel/ftrace.c | 122 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 5 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux_64.lds.S | 0 | ||||
-rw-r--r-- | arch/sh/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/lib/mcount.S | 202 |
11 files changed, 345 insertions, 4 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 46d1a5e71f77..120bd31a46ea 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -34,6 +34,7 @@ config SUPERH32 select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_SYSCALLS + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 39224b57c6ef..52a132c24aab 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -123,4 +123,15 @@ config SH64_SR_WATCH bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" depends on SUPERH64 +config STACK_DEBUG + bool "Enable diagnostic checks of the kernel stack" + depends on FUNCTION_TRACER + select DEBUG_STACKOVERFLOW + default n + help + This option allows checks to be performed on the kernel stack + at runtime. Saying Y here will add overhead to every function + call and will therefore incur a major performance hit. Most + users should say N. + endmenu diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index b09311ad1db3..7e0bcc4d4a96 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -13,8 +13,11 @@ extern void mcount(void); #ifdef CONFIG_DYNAMIC_FTRACE #define CALL_ADDR ((long)(ftrace_call)) #define STUB_ADDR ((long)(ftrace_stub)) +#define GRAPH_ADDR ((long)(ftrace_graph_call)) +#define CALLER_ADDR ((long)(ftrace_caller)) #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) +#define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4) struct dyn_arch_ftrace { /* No extra data needed on sh */ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 499e315f4be2..5123bcaa8509 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -51,7 +51,7 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ .restart_block = { \ .fn = do_no_restart_syscall, \ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index fee924a9c46c..94ed99b68002 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -30,6 +30,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index 99aceb28ee24..d218e808294e 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -26,6 +26,7 @@ int main(void) DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); + DEFINE(TI_SIZE, sizeof(struct thread_info)); #ifdef CONFIG_HIBERNATION DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 4f62eced0aec..6647dfcb781d 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -16,11 +16,13 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/kernel.h> #include <asm/ftrace.h> #include <asm/cacheflush.h> #include <asm/unistd.h> #include <trace/syscall.h> +#ifdef CONFIG_DYNAMIC_FTRACE static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; static unsigned char ftrace_nop[4]; @@ -133,6 +135,126 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +static int ftrace_mod(unsigned long ip, unsigned long old_addr, + unsigned long new_addr) +{ + unsigned char code[MCOUNT_INSN_SIZE]; + + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (old_addr != __raw_readl((unsigned long *)code)) + return -EINVAL; + + __raw_writel(new_addr, ip); + return 0; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&skip_trace); + new_addr = (unsigned long)(&ftrace_graph_caller); + + return ftrace_mod(ip, old_addr, new_addr); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&ftrace_graph_caller); + new_addr = (unsigned long)(&skip_trace); + + return ftrace_mod(ip, old_addr, new_addr); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in the current thread info. + * + * This is the main routine for the function graph tracer. The function + * graph tracer essentially works like this: + * + * parent is the stack address containing self_addr's return address. + * We pull the real return address out of parent and store it in + * current's ret_stack. Then, we replace the return address on the stack + * with the address of return_to_handler. self_addr is the function that + * called mcount. + * + * When self_addr returns, it will jump to return_to_handler which calls + * ftrace_return_to_handler. ftrace_return_to_handler will pull the real + * return address off of current's ret_stack and jump to it. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted, err; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + __asm__ __volatile__( + "1: \n\t" + "mov.l @%2, %0 \n\t" + "2: \n\t" + "mov.l %3, @%2 \n\t" + "mov #0, %1 \n\t" + "3: \n\t" + ".section .fixup, \"ax\" \n\t" + "4: \n\t" + "mov.l 5f, %0 \n\t" + "jmp @%0 \n\t" + " mov #1, %1 \n\t" + ".balign 4 \n\t" + "5: .long 3b \n\t" + ".previous \n\t" + ".section __ex_table,\"a\" \n\t" + ".long 1b, 4b \n\t" + ".long 2b, 4b \n\t" + ".previous \n\t" + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); + if (err == -EBUSY) { + __raw_writel(old, parent); + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + __raw_writel(old, parent); + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_FTRACE_SYSCALLS diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 92d7740faab1..9fee977f176b 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -23,6 +23,7 @@ #include <linux/tick.h> #include <linux/reboot.h> #include <linux/fs.h> +#include <linux/ftrace.h> #include <linux/preempt.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> @@ -264,8 +265,8 @@ static void ubc_set_tracing(int asid, unsigned long pc) * switch_to(x,y) should switch tasks from x to y. * */ -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next) +__notrace_funcgraph struct task_struct * +__switch_to(struct task_struct *prev, struct task_struct *next) { #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/arch/sh/kernel/vmlinux_64.lds.S diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index aaea580b65bb..19328d90a2d1 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -25,6 +25,7 @@ memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o lib-$(CONFIG_MMU) += copy_page.o clear_page.o lib-$(CONFIG_FUNCTION_TRACER) += mcount.o +lib-$(CONFIG_FUNCTION_GRAPH_TRACER) += mcount.o lib-y += $(memcpy-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 71e87f9b4fda..bd3ec648becc 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -9,6 +9,8 @@ * for more details. */ #include <asm/ftrace.h> +#include <asm/thread_info.h> +#include <asm/asm-offsets.h> #define MCOUNT_ENTER() \ mov.l r4, @-r15; \ @@ -28,6 +30,55 @@ rts; \ mov.l @r15+, r4 +#ifdef CONFIG_STACK_DEBUG +/* + * Perform diagnostic checks on the state of the kernel stack. + * + * Check for stack overflow. If there is less than 1KB free + * then it has overflowed. + * + * Make sure the stack pointer contains a valid address. Valid + * addresses for kernel stacks are anywhere after the bss + * (after _ebss) and anywhere in init_thread_union (init_stack). + */ +#define STACK_CHECK() \ + mov #(THREAD_SIZE >> 10), r0; \ + shll8 r0; \ + shll2 r0; \ + \ + /* r1 = sp & (THREAD_SIZE - 1) */ \ + mov #-1, r1; \ + add r0, r1; \ + and r15, r1; \ + \ + mov #TI_SIZE, r3; \ + mov #(STACK_WARN >> 8), r2; \ + shll8 r2; \ + add r3, r2; \ + \ + /* Is the stack overflowing? */ \ + cmp/hi r2, r1; \ + bf stack_panic; \ + \ + /* If sp > _ebss then we're OK. */ \ + mov.l .L_ebss, r1; \ + cmp/hi r1, r15; \ + bt 1f; \ + \ + /* If sp < init_stack, we're not OK. */ \ + mov.l .L_init_thread_union, r1; \ + cmp/hs r1, r15; \ + bf stack_panic; \ + \ + /* If sp > init_stack && sp < _ebss, not OK. */ \ + add r0, r1; \ + cmp/hs r1, r15; \ + bt stack_panic; \ +1: +#else +#define STACK_CHECK() +#endif /* CONFIG_STACK_DEBUG */ + .align 2 .globl _mcount .type _mcount,@function @@ -41,6 +92,8 @@ mcount: tst r0, r0 bf ftrace_stub #endif + STACK_CHECK() + MCOUNT_ENTER() #ifdef CONFIG_DYNAMIC_FTRACE @@ -58,14 +111,62 @@ mcount_call: jsr @r6 nop +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + mov.l .Lftrace_graph_return, r6 + mov.l .Lftrace_stub, r7 + cmp/eq r6, r7 + bt 1f + + mov.l .Lftrace_graph_caller, r0 + jmp @r0 + nop + +1: + mov.l .Lftrace_graph_entry, r6 + mov.l .Lftrace_graph_entry_stub, r7 + cmp/eq r6, r7 + bt skip_trace + + mov.l .Lftrace_graph_caller, r0 + jmp @r0 + nop + + .align 2 +.Lftrace_graph_return: + .long ftrace_graph_return +.Lftrace_graph_entry: + .long ftrace_graph_entry +.Lftrace_graph_entry_stub: + .long ftrace_graph_entry_stub +.Lftrace_graph_caller: + .long ftrace_graph_caller +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + + .globl skip_trace skip_trace: MCOUNT_LEAVE() .align 2 .Lftrace_trace_function: - .long ftrace_trace_function + .long ftrace_trace_function #ifdef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * NOTE: Do not move either ftrace_graph_call or ftrace_caller + * as this will affect the calculation of GRAPH_INSN_OFFSET. + */ + .globl ftrace_graph_call +ftrace_graph_call: + mov.l .Lskip_trace, r0 + jmp @r0 + nop + + .align 2 +.Lskip_trace: + .long skip_trace +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + .globl ftrace_caller ftrace_caller: mov.l .Lfunction_trace_stop, r0 @@ -73,6 +174,8 @@ ftrace_caller: tst r0, r0 bf ftrace_stub + STACK_CHECK() + MCOUNT_ENTER() .globl ftrace_call @@ -81,7 +184,12 @@ ftrace_call: jsr @r6 nop +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bra ftrace_graph_call + nop +#else MCOUNT_LEAVE() +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */ /* @@ -100,6 +208,98 @@ ftrace_stub: rts nop +#ifdef CONFIG_STACK_DEBUG + .globl stack_panic +stack_panic: + mov.l .Ldump_stack, r0 + jsr @r0 + nop + + mov.l .Lpanic, r0 + jsr @r0 + mov.l .Lpanic_s, r4 + + rts + nop + .align 2 .Lfunction_trace_stop: .long function_trace_stop +.L_ebss: + .long _ebss +.L_init_thread_union: + .long init_thread_union +.Lpanic: + .long panic +.Lpanic_s: + .long .Lpanic_str +.Ldump_stack: + .long dump_stack + + .section .rodata + .align 2 +.Lpanic_str: + .string "Stack error" +#endif /* CONFIG_STACK_DEBUG */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + mov.l 2f, r0 + mov.l @r0, r0 + tst r0, r0 + bt 1f + + mov.l 3f, r1 + jmp @r1 + nop +1: + /* + * MCOUNT_ENTER() pushed 5 registers onto the stack, so + * the stack address containing our return address is + * r15 + 20. + */ + mov #20, r0 + add r15, r0 + mov r0, r4 + + mov.l .Lprepare_ftrace_return, r0 + jsr @r0 + nop + + MCOUNT_LEAVE() + + .align 2 +2: .long function_trace_stop +3: .long skip_trace +.Lprepare_ftrace_return: + .long prepare_ftrace_return + + .globl return_to_handler +return_to_handler: + /* + * Save the return values. + */ + mov.l r0, @-r15 + mov.l r1, @-r15 + + mov #0, r4 + + mov.l .Lftrace_return_to_handler, r0 + jsr @r0 + nop + + /* + * The return value from ftrace_return_handler has the real + * address that we should return to. + */ + lds r0, pr + mov.l @r15+, r1 + rts + mov.l @r15+, r0 + + + .align 2 +.Lftrace_return_to_handler: + .long ftrace_return_to_handler +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |