diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/Kconfig | 22 | ||||
-rw-r--r-- | arch/powerpc/Kconfig.debug | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/atomic.h | 69 | ||||
-rw-r--r-- | arch/powerpc/include/asm/hw_breakpoint.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kprobes.h | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/hw_breakpoint.c | 47 | ||||
-rw-r--r-- | arch/powerpc/kernel/kprobes-ftrace.c | 72 | ||||
-rw-r--r-- | arch/powerpc/kernel/kprobes.c | 92 | ||||
-rw-r--r-- | arch/powerpc/kernel/trace/ftrace_64_mprofile.S | 39 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso32/note.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 6 | ||||
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 6 | ||||
-rw-r--r-- | arch/powerpc/purgatory/Makefile | 3 |
13 files changed, 93 insertions, 290 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9f2b75fe2c2d..1c10ff0406f2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -383,10 +383,6 @@ config PGTABLE_LEVELS default 3 if PPC_64K_PAGES && !PPC_BOOK3S_64 default 4 -source "init/Kconfig" - -source "kernel/Kconfig.freezer" - source "arch/powerpc/sysdev/Kconfig" source "arch/powerpc/platforms/Kconfig" @@ -397,8 +393,6 @@ config HIGHMEM depends on PPC32 source kernel/Kconfig.hz -source kernel/Kconfig.preempt -source "fs/Kconfig.binfmt" config HUGETLB_PAGE_SIZE_VARIABLE bool @@ -641,8 +635,6 @@ config ILLEGAL_POINTER_VALUE default 0x5deadbeef0000000 if PPC64 default 0 -source "mm/Kconfig" - config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG @@ -1201,20 +1193,6 @@ endif config ARCH_RANDOM def_bool n -source "net/Kconfig" - -source "drivers/Kconfig" - -source "fs/Kconfig" - -source "lib/Kconfig" - -source "arch/powerpc/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" - config PPC_LIB_RHEAP bool diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index c45424c64e19..fd63cd914a74 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -menu "Kernel hacking" - -source "lib/Kconfig.debug" config PPC_DISABLE_WERROR bool "Don't build arch/powerpc code with -Werror" @@ -379,5 +376,3 @@ config PPC_FAST_ENDIAN_SWITCH depends on DEBUG_KERNEL && PPC_BOOK3S_64 help If you're unsure what this is, say N. - -endmenu diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 682b3e6a1e21..963abf8bf1c0 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -18,18 +18,11 @@ * a "bne-" instruction at the end, so an isync is enough as a acquire barrier * on the platform without lwsync. */ -#define __atomic_op_acquire(op, args...) \ -({ \ - typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \ - __ret; \ -}) - -#define __atomic_op_release(op, args...) \ -({ \ - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \ - op##_relaxed(args); \ -}) +#define __atomic_acquire_fence() \ + __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory") + +#define __atomic_release_fence() \ + __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory") static __inline__ int atomic_read(const atomic_t *v) { @@ -129,8 +122,6 @@ ATOMIC_OPS(xor, xor) #undef ATOMIC_OP_RETURN_RELAXED #undef ATOMIC_OP -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - static __inline__ void atomic_inc(atomic_t *v) { int t; @@ -145,6 +136,7 @@ static __inline__ void atomic_inc(atomic_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic_inc atomic_inc static __inline__ int atomic_inc_return_relaxed(atomic_t *v) { @@ -163,16 +155,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v) return t; } -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - static __inline__ void atomic_dec(atomic_t *v) { int t; @@ -187,6 +169,7 @@ static __inline__ void atomic_dec(atomic_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic_dec atomic_dec static __inline__ int atomic_dec_return_relaxed(atomic_t *v) { @@ -218,7 +201,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -226,13 +209,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int t; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%1 # __atomic_add_unless\n\ +"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ cmpw 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" @@ -248,6 +231,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return t; } +#define atomic_fetch_add_unless atomic_fetch_add_unless /** * atomic_inc_not_zero - increment unless the number is zero @@ -280,9 +264,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v) } #define atomic_inc_not_zero(v) atomic_inc_not_zero((v)) -#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) - /* * Atomically test *v and decrement if it is greater than 0. * The function returns the old value of *v minus 1, even if @@ -412,8 +393,6 @@ ATOMIC64_OPS(xor, xor) #undef ATOMIC64_OP_RETURN_RELAXED #undef ATOMIC64_OP -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - static __inline__ void atomic64_inc(atomic64_t *v) { long t; @@ -427,6 +406,7 @@ static __inline__ void atomic64_inc(atomic64_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic64_inc atomic64_inc static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) { @@ -444,16 +424,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) return t; } -/* - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - static __inline__ void atomic64_dec(atomic64_t *v) { long t; @@ -467,6 +437,7 @@ static __inline__ void atomic64_dec(atomic64_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic64_dec atomic64_dec static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) { @@ -487,9 +458,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) - /* * Atomically test *v and decrement if it is greater than 0. * The function returns the old value of *v minus 1. @@ -513,6 +481,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) return t; } +#define atomic64_dec_if_positive atomic64_dec_if_positive #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic64_cmpxchg_relaxed(v, o, n) \ @@ -524,7 +493,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -532,13 +501,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) { long t; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%1 # __atomic_add_unless\n\ +"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\ cmpd 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" @@ -551,8 +520,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) : "r" (&v->counter), "r" (a), "r" (u) : "cc", "memory"); - return t != u; + return t; } +#define atomic64_fetch_add_unless atomic64_fetch_add_unless /** * atomic_inc64_not_zero - increment unless the number is zero @@ -582,6 +552,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v) return t1 != 0; } +#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v)) #endif /* __powerpc64__ */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 8e7b09703ca4..27d6e3c8fde9 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -52,6 +52,7 @@ struct arch_hw_breakpoint { #include <asm/reg.h> #include <asm/debug.h> +struct perf_event_attr; struct perf_event; struct pmu; struct perf_sample_data; @@ -60,8 +61,10 @@ struct perf_sample_data; extern int hw_breakpoint_slots(int type); extern int arch_bp_generic_fields(int type, int *gen_bp_type); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); int arch_install_hw_breakpoint(struct perf_event *bp); diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 9f3be5c8a4a3..785c464b6588 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -88,7 +88,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_saved_msr; - struct pt_regs jprobe_saved_regs; struct prev_kprobe prev_kprobe; }; @@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); -#ifdef CONFIG_KPROBES_ON_FTRACE -extern int __is_active_jprobe(unsigned long addr); -extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb); -#else -static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - return 0; -} -#endif #else static inline int kprobe_handler(struct pt_regs *regs) { return 0; } static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 80547dad37da..fec8a6773119 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp) /* * Check for virtual address in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - - return is_kernel_addr(info->address); + return is_kernel_addr(hw->address); } int arch_bp_generic_fields(int type, int *gen_bp_type) @@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type) /* * Validate the arch-specific HW Breakpoint register settings */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { int ret = -EINVAL, length_max; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); if (!bp) return ret; - info->type = HW_BRK_TYPE_TRANSLATE; - if (bp->attr.bp_type & HW_BREAKPOINT_R) - info->type |= HW_BRK_TYPE_READ; - if (bp->attr.bp_type & HW_BREAKPOINT_W) - info->type |= HW_BRK_TYPE_WRITE; - if (info->type == HW_BRK_TYPE_TRANSLATE) + hw->type = HW_BRK_TYPE_TRANSLATE; + if (attr->bp_type & HW_BREAKPOINT_R) + hw->type |= HW_BRK_TYPE_READ; + if (attr->bp_type & HW_BREAKPOINT_W) + hw->type |= HW_BRK_TYPE_WRITE; + if (hw->type == HW_BRK_TYPE_TRANSLATE) /* must set alteast read or write */ return ret; - if (!(bp->attr.exclude_user)) - info->type |= HW_BRK_TYPE_USER; - if (!(bp->attr.exclude_kernel)) - info->type |= HW_BRK_TYPE_KERNEL; - if (!(bp->attr.exclude_hv)) - info->type |= HW_BRK_TYPE_HYP; - info->address = bp->attr.bp_addr; - info->len = bp->attr.bp_len; + if (!attr->exclude_user) + hw->type |= HW_BRK_TYPE_USER; + if (!attr->exclude_kernel) + hw->type |= HW_BRK_TYPE_KERNEL; + if (!attr->exclude_hv) + hw->type |= HW_BRK_TYPE_HYP; + hw->address = attr->bp_addr; + hw->len = attr->bp_len; /* * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) @@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ - if ((bp->attr.bp_addr >> 9) != - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9)) + if ((attr->bp_addr >> 9) != + ((attr->bp_addr + attr->bp_len - 1) >> 9)) return -EINVAL; } - if (info->len > - (length_max - (info->address & HW_BREAKPOINT_ALIGN))) + if (hw->len > + (length_max - (hw->address & HW_BREAKPOINT_ALIGN))) return -EINVAL; return 0; } diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 7a1f99f1b47f..e4a49c051325 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -25,50 +25,6 @@ #include <linux/preempt.h> #include <linux/ftrace.h> -/* - * This is called from ftrace code after invoking registered handlers to - * disambiguate regs->nip changes done by jprobes and livepatch. We check if - * there is an active jprobe at the provided address (mcount location). - */ -int __is_active_jprobe(unsigned long addr) -{ - if (!preemptible()) { - struct kprobe *p = raw_cpu_read(current_kprobe); - return (p && (unsigned long)p->addr == addr) ? 1 : 0; - } - - return 0; -} - -static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb, unsigned long orig_nip) -{ - /* - * Emulate singlestep (and also recover regs->nip) - * as if there is a nop - */ - regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; - if (unlikely(p->post_handler)) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } - __this_cpu_write(current_kprobe, NULL); - if (orig_nip) - regs->nip = orig_nip; - return 1; -} - -int skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) -{ - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; -} -NOKPROBE_SYMBOL(skip_singlestep); - /* Ftrace callback handler for kprobes */ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct ftrace_ops *ops, struct pt_regs *regs) @@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct kprobe *p; struct kprobe_ctlblk *kcb; - preempt_disable(); - p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) - goto end; + return; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { - unsigned long orig_nip = regs->nip; - /* * On powerpc, NIP is *before* this instruction for the * pre handler @@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) - __skip_singlestep(p, regs, kcb, orig_nip); - else { + if (!p->pre_handler || !p->pre_handler(p, regs)) { /* - * If pre_handler returns !0, it sets regs->nip and - * resets current kprobe. In this case, we should not - * re-enable preemption. + * Emulate singlestep (and also recover regs->nip) + * as if there is a nop */ - return; + regs->nip += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } } + /* + * If pre_handler returns !0, it changes regs->nip. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); } -end: - preempt_enable_no_resched(); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e4c5bf33970b..5c60bb0f927f 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs) } prepare_singlestep(p, regs); return 1; - } else { - if (*addr != BREAKPOINT_INSTRUCTION) { - /* If trap variant, then it belongs not to us */ - kprobe_opcode_t cur_insn = *addr; - if (is_trap(cur_insn)) - goto no_kprobe; - /* The breakpoint instruction was removed by - * another cpu right after we hit, no further - * handling of this interrupt is appropriate - */ - ret = 1; + } else if (*addr != BREAKPOINT_INSTRUCTION) { + /* If trap variant, then it belongs not to us */ + kprobe_opcode_t cur_insn = *addr; + + if (is_trap(cur_insn)) goto no_kprobe; - } - p = __this_cpu_read(current_kprobe); - if (p->break_handler && p->break_handler(p, regs)) { - if (!skip_singlestep(p, regs, kcb)) - goto ss_probe; - ret = 1; - } + /* The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; } goto no_kprobe; } @@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs) */ kprobe_opcode_t cur_insn = *addr; if (is_trap(cur_insn)) - goto no_kprobe; + goto no_kprobe; /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p, regs, kcb); - if (p->pre_handler && p->pre_handler(p, regs)) - /* handler has already set things up, so skip ss setup */ + if (p->pre_handler && p->pre_handler(p, regs)) { + /* handler changed execution path, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); return 1; + } -ss_probe: if (p->ainsn.boostable >= 0) { ret = try_to_emulate(p, regs); @@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry) } NOKPROBE_SYMBOL(arch_deref_entry_point); -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); - - /* setup return addr to the jprobe handler routine */ - regs->nip = arch_deref_entry_point(jp->entry); -#ifdef PPC64_ELF_ABI_v2 - regs->gpr[12] = (unsigned long)jp->entry; -#elif defined(PPC64_ELF_ABI_v1) - regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); -#endif - - /* - * jprobes use jprobe_return() which skips the normal return - * path of the function, and this messes up the accounting of the - * function graph tracer. - * - * Pause function graph tracing while performing the jprobe function. - */ - pause_graph_tracing(); - - return 1; -} -NOKPROBE_SYMBOL(setjmp_pre_handler); - -void __used jprobe_return(void) -{ - asm volatile("jprobe_return_trap:\n" - "trap\n" - ::: "memory"); -} -NOKPROBE_SYMBOL(jprobe_return); - -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) { - pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n", - regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap")); - return 0; - } - - memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); - /* It's OK to start function graph tracing again */ - unpause_graph_tracing(); - preempt_enable_no_resched(); - return 1; -} -NOKPROBE_SYMBOL(longjmp_break_handler); - static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *) &kretprobe_trampoline, .pre_handler = trampoline_probe_handler diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 9a5b5a513604..32476a6e4e9c 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -104,39 +104,13 @@ ftrace_regs_call: bl ftrace_stub nop - /* Load the possibly modified NIP */ - ld r15, _NIP(r1) - + /* Load ctr with the possibly modified NIP */ + ld r3, _NIP(r1) + mtctr r3 #ifdef CONFIG_LIVEPATCH - cmpd r14, r15 /* has NIP been altered? */ + cmpd r14, r3 /* has NIP been altered? */ #endif -#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) - /* NIP has not been altered, skip over further checks */ - beq 1f - - /* Check if there is an active jprobe on us */ - subi r3, r14, 4 - bl __is_active_jprobe - nop - - /* - * If r3 == 1, then this is a kprobe/jprobe. - * else, this is livepatched function. - * - * The conditional branch for livepatch_handler below will use the - * result of this comparison. For kprobe/jprobe, we just need to branch to - * the new NIP, not call livepatch_handler. The branch below is bne, so we - * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want - * CR0[EQ] = (r3 == 1). - */ - cmpdi r3, 1 -1: -#endif - - /* Load CTR with the possibly modified NIP */ - mtctr r15 - /* Restore gprs */ REST_GPR(0,r1) REST_10GPRS(2,r1) @@ -154,10 +128,7 @@ ftrace_regs_call: addi r1, r1, SWITCH_FRAME_SIZE #ifdef CONFIG_LIVEPATCH - /* - * Based on the cmpd or cmpdi above, if the NIP was altered and we're - * not on a kprobe/jprobe, then handle livepatch. - */ + /* Based on the cmpd above, if the NIP was altered handle livepatch */ bne- livepatch_handler #endif diff --git a/arch/powerpc/kernel/vdso32/note.S b/arch/powerpc/kernel/vdso32/note.S index d4b5be4f3d5f..227a7327399e 100644 --- a/arch/powerpc/kernel/vdso32/note.S +++ b/arch/powerpc/kernel/vdso32/note.S @@ -5,6 +5,7 @@ #include <linux/uts.h> #include <linux/version.h> +#include <linux/build-salt.h> #define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ .section name, flags; \ @@ -23,3 +24,5 @@ ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) .long LINUX_VERSION_CODE ASM_ELF_NOTE_END + +BUILD_SALT diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index de686b340f4a..ee4a8854985e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) wqp = kvm_arch_vcpu_wq(vcpu); if (swq_has_sleeper(wqp)) { - swake_up(wqp); + swake_up_one(wqp); ++vcpu->stat.halt_wakeup; } @@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) } } - prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); + prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE); if (kvmppc_vcore_check_block(vc)) { finish_swait(&vc->wq, &wait); @@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_start_thread(vcpu, vc); trace_kvm_guest_enter(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { - swake_up(&vc->wq); + swake_up_one(&vc->wq); } } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 3f66fcf8ad99..19d8ab49d1bd 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count, } /* - * Add a event to the PMU. + * Add an event to the PMU. * If all events are not already frozen, then we disable and * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. @@ -1548,7 +1548,7 @@ nocheck: } /* - * Remove a event from the PMU. + * Remove an event from the PMU. */ static void power_pmu_del(struct perf_event *event, int ef_flags) { @@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu) /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. - * A event can only go on a limited PMC if it counts something + * An event can only go on a limited PMC if it counts something * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile index 30e05decbb4c..4314ba5baf43 100644 --- a/arch/powerpc/purgatory/Makefile +++ b/arch/powerpc/purgatory/Makefile @@ -6,9 +6,8 @@ LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined $(obj)/purgatory.ro: $(obj)/trampoline.o FORCE $(call if_changed,ld) -CMD_BIN2C = $(objtree)/scripts/basic/bin2c quiet_cmd_bin2c = BIN2C $@ - cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ + cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@ $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE $(call if_changed,bin2c) |