diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/Kconfig | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/livepatch.h | 62 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/systbl.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/thread_info.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/word-at-a-time.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/cputable.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 97 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/mce.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 26 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 4 | ||||
-rw-r--r-- | arch/powerpc/perf/callchain.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/inode.c | 6 |
20 files changed, 221 insertions, 34 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7cd32c038286..a18a0dcd57b7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -126,7 +126,7 @@ config PPC select IRQ_FORCED_THREADING select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS - select HAVE_BPF_JIT + select HAVE_CBPF_JIT select HAVE_ARCH_JUMP_LABEL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAS_GCOV_PROFILE_ALL @@ -160,6 +160,7 @@ config PPC select HAVE_ARCH_SECCOMP_FILTER select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -1107,3 +1108,5 @@ config PPC_LIB_RHEAP bool source "arch/powerpc/kvm/Kconfig" + +source "kernel/livepatch/Kconfig" diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h new file mode 100644 index 000000000000..a402f7f94896 --- /dev/null +++ b/arch/powerpc/include/asm/livepatch.h @@ -0,0 +1,62 @@ +/* + * livepatch.h - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2015-2016, SUSE, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef _ASM_POWERPC_LIVEPATCH_H +#define _ASM_POWERPC_LIVEPATCH_H + +#include <linux/module.h> +#include <linux/ftrace.h> + +#ifdef CONFIG_LIVEPATCH +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +static inline int klp_write_module_reloc(struct module *mod, unsigned long + type, unsigned long loc, unsigned long value) +{ + /* This requires infrastructure changes; we need the loadinfos. */ + return -ENOSYS; +} + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->nip = ip; +} + +#define klp_get_ftrace_location klp_get_ftrace_location +static inline unsigned long klp_get_ftrace_location(unsigned long faddr) +{ + /* + * Live patch works only with -mprofile-kernel on PPC. In this case, + * the ftrace location is always within the first 16 bytes. + */ + return ftrace_location_range(faddr, faddr + 16); +} + +static inline void klp_init_thread_info(struct thread_info *ti) +{ + /* + 1 to account for STACK_END_MAGIC */ + ti->livepatch_sp = (unsigned long *)(ti + 1) + 1; +} +#else +static void klp_init_thread_info(struct thread_info *ti) { } +#endif /* CONFIG_LIVEPATCH */ + +#endif /* _ASM_POWERPC_LIVEPATCH_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8ab8a1a9610a..009fab130cd8 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -246,7 +246,7 @@ struct thread_struct { #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX /* VSR status */ - int used_vsr; /* set if process has used altivec */ + int used_vsr; /* set if process has used VSX */ #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE unsigned long evr[32]; /* upper 32-bits of SPE regs */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3fa9df70aa20..2fc5d4db503c 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -384,3 +384,5 @@ SYSCALL(ni_syscall) SYSCALL(ni_syscall) SYSCALL(mlock2) SYSCALL(copy_file_range) +COMPAT_SYS_SPU(preadv2) +COMPAT_SYS_SPU(pwritev2) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 7efee4a3240b..8febc3f66d53 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -43,7 +43,9 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ unsigned long local_flags; /* private flags for thread */ - +#ifdef CONFIG_LIVEPATCH + unsigned long *livepatch_sp; +#endif /* low level flags - has atomic operations done on it */ unsigned long flags ____cacheline_aligned_in_smp; }; diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 1f2594d45605..cf12c580f6b2 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 380 +#define NR_syscalls 382 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index e4396a7d0f7c..4afe66aa1400 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits) "andc %1,%1,%2\n\t" "popcntd %0,%1" : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) - : "r" (bits)); + : "b" (bits)); return leading_zero_bits; } diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index 8dde19962a5b..f63c96cd3608 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h @@ -31,6 +31,7 @@ #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ 0x00000040 +/* Reserved - do not use 0x00000004 */ #define PPC_FEATURE_TRUE_LE 0x00000002 #define PPC_FEATURE_PPC_LE 0x00000001 diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 940290d45b08..e9f5f41aa55a 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -390,5 +390,7 @@ #define __NR_membarrier 365 #define __NR_mlock2 378 #define __NR_copy_file_range 379 +#define __NR_preadv2 380 +#define __NR_pwritev2 381 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0d0183d3180a..c9370d4e36bd 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -86,6 +86,10 @@ int main(void) DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); #endif /* CONFIG_PPC64 */ +#ifdef CONFIG_LIVEPATCH + DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); +#endif + DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9916d150b28c..39a79c89a4b6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -20,6 +20,7 @@ #include <linux/errno.h> #include <linux/err.h> +#include <linux/magic.h> #include <asm/unistd.h> #include <asm/processor.h> #include <asm/page.h> @@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller) addi r3,r3,function_trace_op@toc@l ld r5,0(r3) +#ifdef CONFIG_LIVEPATCH + mr r14,r7 /* remember old NIP */ +#endif /* Calculate ip from nip-4 into r3 for call below */ subi r3, r7, MCOUNT_INSN_SIZE @@ -1272,6 +1276,9 @@ ftrace_call: /* Load ctr with the possibly modified NIP */ ld r3, _NIP(r1) mtctr r3 +#ifdef CONFIG_LIVEPATCH + cmpd r14,r3 /* has NIP been altered? */ +#endif /* Restore gprs */ REST_8GPRS(0,r1) @@ -1289,6 +1296,11 @@ ftrace_call: ld r0, LRSAVE(r1) mtlr r0 +#ifdef CONFIG_LIVEPATCH + /* Based on the cmpd above, if the NIP was altered handle livepatch */ + bne- livepatch_handler +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER stdu r1, -112(r1) .globl ftrace_graph_call @@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr + +#ifdef CONFIG_LIVEPATCH + /* + * This function runs in the mcount context, between two functions. As + * such it can only clobber registers which are volatile and used in + * function linkage. + * + * We get here when a function A, calls another function B, but B has + * been live patched with a new function C. + * + * On entry: + * - we have no stack frame and can not allocate one + * - LR points back to the original caller (in A) + * - CTR holds the new NIP in C + * - r0 & r12 are free + * + * r0 can't be used as the base register for a DS-form load or store, so + * we temporarily shuffle r1 (stack pointer) into r0 and then put it back. + */ +livepatch_handler: + CURRENT_THREAD_INFO(r12, r1) + + /* Save stack pointer into r0 */ + mr r0, r1 + + /* Allocate 3 x 8 bytes */ + ld r1, TI_livepatch_sp(r12) + addi r1, r1, 24 + std r1, TI_livepatch_sp(r12) + + /* Save toc & real LR on livepatch stack */ + std r2, -24(r1) + mflr r12 + std r12, -16(r1) + + /* Store stack end marker */ + lis r12, STACK_END_MAGIC@h + ori r12, r12, STACK_END_MAGIC@l + std r12, -8(r1) + + /* Restore real stack pointer */ + mr r1, r0 + + /* Put ctr in r12 for global entry and branch there */ + mfctr r12 + bctrl + + /* + * Now we are returning from the patched function to the original + * caller A. We are free to use r0 and r12, and we can use r2 until we + * restore it. + */ + + CURRENT_THREAD_INFO(r12, r1) + + /* Save stack pointer into r0 */ + mr r0, r1 + + ld r1, TI_livepatch_sp(r12) + + /* Check stack marker hasn't been trashed */ + lis r2, STACK_END_MAGIC@h + ori r2, r2, STACK_END_MAGIC@l + ld r12, -8(r1) +1: tdne r12, r2 + EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 + + /* Restore LR & toc from livepatch stack */ + ld r12, -16(r1) + mtlr r12 + ld r2, -24(r1) + + /* Pop livepatch stack frame */ + CURRENT_THREAD_INFO(r12, r0) + subi r1, r1, 24 + std r1, TI_livepatch_sp(r12) + + /* Restore real stack pointer */ + mr r1, r0 + + /* Return to original caller of live patched function */ + blr +#endif + + #else _GLOBAL_TOC(_mcount) /* Taken from output of objdump from lib64/glibc */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 290559df1e8b..3cb46a3b1de7 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -66,6 +66,7 @@ #include <asm/udbg.h> #include <asm/smp.h> #include <asm/debug.h> +#include <asm/livepatch.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -607,10 +608,12 @@ void irq_ctx_init(void) memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; + klp_init_thread_info(tp); memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); tp = hardirq_ctx[i]; tp->cpu = i; + klp_init_thread_info(tp); } } diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index b2eb4686bd8f..671fd5122406 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -284,7 +284,7 @@ void machine_check_print_event_info(struct machine_check_event *evt) printk("%s Effective address: %016llx\n", level, evt->u.ue_error.effective_address); if (evt->u.ue_error.physical_address_provided) - printk("%s Physial address: %016llx\n", + printk("%s Physical address: %016llx\n", level, evt->u.ue_error.physical_address); break; case MCE_ERROR_TYPE_SLB: diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 612df305886b..2a9280b945e0 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -55,6 +55,8 @@ #include <asm/firmware.h> #endif #include <asm/code-patching.h> +#include <asm/livepatch.h> + #include <linux/kprobes.h> #include <linux/kdebug.h> @@ -983,7 +985,7 @@ void restore_tm_state(struct pt_regs *regs) static inline void save_sprs(struct thread_struct *t) { #ifdef CONFIG_ALTIVEC - if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC))) + if (cpu_has_feature(CPU_FTR_ALTIVEC)) t->vrsave = mfspr(SPRN_VRSAVE); #endif #ifdef CONFIG_PPC_BOOK3S_64 @@ -1400,13 +1402,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, extern void ret_from_kernel_thread(void); void (*f)(void); unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; + struct thread_info *ti = task_thread_info(p); + + klp_init_thread_info(ti); /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ - struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); /* function */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 7030b035905d..a15fe1d4e84a 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -148,23 +148,25 @@ static struct ibm_pa_feature { unsigned long cpu_features; /* CPU_FTR_xxx bit */ unsigned long mmu_features; /* MMU_FTR_xxx bit */ unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ + unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */ unsigned char pabyte; /* byte number in ibm,pa-features */ unsigned char pabit; /* bit number (big-endian) */ unsigned char invert; /* if 1, pa bit set => clear feature */ } ibm_pa_features[] __initdata = { - {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, - {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, - {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, - {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, - {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, - {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, - {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, + {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0}, + {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0}, + {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0}, + {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0}, + {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1}, + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0}, + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0}, /* - * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), - * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP - * which is 0 if the kernel doesn't support TM. + * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n), + * we don't want to turn on TM here, so we use the *_COMP versions + * which are 0 if the kernel doesn't support TM. */ - {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, + {CPU_FTR_TM_COMP, 0, 0, + PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, }; static void __init scan_features(unsigned long node, const unsigned char *ftrs, @@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs, if (bit ^ fp->invert) { cur_cpu_spec->cpu_features |= fp->cpu_features; cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; + cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2; cur_cpu_spec->mmu_features |= fp->mmu_features; } else { cur_cpu_spec->cpu_features &= ~fp->cpu_features; cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; + cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2; cur_cpu_spec->mmu_features &= ~fp->mmu_features; } } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f98be8383a39..96d4a2b23d0f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -69,6 +69,7 @@ #include <asm/kvm_ppc.h> #include <asm/hugetlb.h> #include <asm/epapr_hcalls.h> +#include <asm/livepatch.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -667,16 +668,16 @@ static void __init emergency_stack_init(void) limit = min(safe_stack_limit(), ppc64_rma_size); for_each_possible_cpu(i) { - unsigned long sp; - sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); - sp += THREAD_SIZE; - paca[i].emergency_sp = __va(sp); + struct thread_info *ti; + ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); + klp_init_thread_info(ti); + paca[i].emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for machine check exception handling. */ - sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); - sp += THREAD_SIZE; - paca[i].mc_emergency_sp = __va(sp); + ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); + klp_init_thread_info(ti); + paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; #endif } } @@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.panic) setup_panic(); + klp_init_thread_info(&init_thread_info); + init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8cac1eb41466..55c924b65f71 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -565,7 +565,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) smp_ops->give_timebase(); /* Wait until cpu puts itself in the online & active maps */ - while (!cpu_online(cpu) || !cpu_active(cpu)) + while (!cpu_online(cpu)) cpu_relax(); return 0; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6dd272b6196f..d991b9e80dbb 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -413,13 +413,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) { struct hugepd_freelist **batchp; - batchp = this_cpu_ptr(&hugepd_freelist_cur); + batchp = &get_cpu_var(hugepd_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))) { kmem_cache_free(hugepte_cache, hugepte); - put_cpu_var(hugepd_freelist_cur); + put_cpu_var(hugepd_freelist_cur); return; } diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index e04a6752b399..22d9015c1acc 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); - while (entry->nr < PERF_MAX_STACK_DEPTH) { + while (entry->nr < sysctl_perf_event_max_stack) { fp = (unsigned long __user *) sp; if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) return; @@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); - while (entry->nr < PERF_MAX_STACK_DEPTH) { + while (entry->nr < sysctl_perf_event_max_stack) { fp = (unsigned int __user *) (unsigned long) sp; if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) return; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index dfa863876778..5be15cff758d 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -238,7 +238,7 @@ const struct file_operations spufs_context_fops = { .release = spufs_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, - .iterate = dcache_readdir, + .iterate_shared = dcache_readdir, .fsync = noop_fsync, }; EXPORT_SYMBOL_GPL(spufs_context_fops); @@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = SPUFS_MAGIC; sb->s_op = &s_ops; sb->s_fs_info = info; |