diff options
31 files changed, 219 insertions, 118 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index e3ea151c9597..b7212b619c52 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ all: zImage # With make 3.82 we cannot mix normal and wildcard targets -BOOT_TARGETS1 := zImage zImage.initrd uImaged +BOOT_TARGETS1 := zImage zImage.initrd uImage BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index 5806ef0b860b..a30370396250 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts @@ -163,6 +163,14 @@ interrupts = <0x1e 4>; }; + SATA0: sata@bffd1000 { + compatible = "amcc,sata-460ex"; + reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>; + interrupt-parent = <&UIC3>; + interrupts = <0x0 0x4 /* SATA */ + 0x5 0x4>; /* AHBDMA */ + }; + POB0: opb { compatible = "ibm,opb-460ex", "ibm,opb"; #address-cells = <1>; diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 0e398cfee2c8..acac35d5b382 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -433,7 +433,7 @@ typedef struct { * with. However gcc is not clever enough to compute the * modulus (2^n-1) without a second multiply. */ -#define vsid_scrample(protovsid, size) \ +#define vsid_scramble(protovsid, size) \ ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) #else /* 1 */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d8be016d2ede..ff0005eec7dd 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -951,7 +951,14 @@ #ifdef CONFIG_PPC64 extern void ppc64_runlatch_on(void); -extern void ppc64_runlatch_off(void); +extern void __ppc64_runlatch_off(void); + +#define ppc64_runlatch_off() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + test_thread_flag(TIF_RUNLATCH)) \ + __ppc64_runlatch_off(); \ + } while (0) extern unsigned long scom970_read(unsigned int address); extern void scom970_write(unsigned int address, unsigned long value); diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 24cd9281ec37..8447d89fbe72 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h @@ -21,15 +21,20 @@ /* * the semaphore definition */ -struct rw_semaphore { - /* XXX this should be able to be an atomic_t -- paulus */ - signed int count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) +#ifdef CONFIG_PPC64 +# define RWSEM_ACTIVE_MASK 0xffffffffL +#else +# define RWSEM_ACTIVE_MASK 0x0000ffffL +#endif + +#define RWSEM_UNLOCKED_VALUE 0x00000000L +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +struct rw_semaphore { + long count; spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -43,9 +48,13 @@ struct rw_semaphore { # define __RWSEM_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } +#define __RWSEM_INITIALIZER(name) \ +{ \ + RWSEM_UNLOCKED_VALUE, \ + __SPIN_LOCK_UNLOCKED((name).wait_lock), \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) \ +} #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name, */ static inline void __down_read(struct rw_semaphore *sem) { - if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) + if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) rwsem_down_read_failed(sem); } static inline int __down_read_trylock(struct rw_semaphore *sem) { - int tmp; + long tmp; while ((tmp = sem->count) >= 0) { if (tmp == cmpxchg(&sem->count, tmp, @@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) */ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { - int tmp; + long tmp; - tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, - (atomic_t *)(&sem->count)); + tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) rwsem_down_write_failed(sem); } @@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem) { - int tmp; + long tmp; tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); @@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) */ static inline void __up_read(struct rw_semaphore *sem) { - int tmp; + long tmp; - tmp = atomic_dec_return((atomic_t *)(&sem->count)); + tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) rwsem_wake(sem); } @@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem) */ static inline void __up_write(struct rw_semaphore *sem) { - if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, - (atomic_t *)(&sem->count)) < 0)) + if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count) < 0)) rwsem_wake(sem); } /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) { - atomic_add(delta, (atomic_t *)(&sem->count)); + atomic_long_add(delta, (atomic_long_t *)&sem->count); } /* @@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) */ static inline void __downgrade_write(struct rw_semaphore *sem) { - int tmp; + long tmp; - tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); + tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, + (atomic_long_t *)&sem->count); if (tmp < 0) rwsem_downgrade_wake(sem); } @@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) { - return atomic_add_return(delta, (atomic_t *)(&sem->count)); + return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); } static inline int rwsem_is_locked(struct rw_semaphore *sem) { - return (sem->count != 0); + return sem->count != 0; } #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index a5ee345b6a5c..3d212669a130 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -326,3 +326,6 @@ SYSCALL_SPU(perf_event_open) COMPAT_SYS_SPU(preadv) COMPAT_SYS_SPU(pwritev) COMPAT_SYS(rt_tgsigqueueinfo) +SYSCALL(fanotify_init) +COMPAT_SYS(fanotify_mark) +SYSCALL_SPU(prlimit64) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f0a10266e7f7..597e6f9d094a 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -345,10 +345,13 @@ #define __NR_preadv 320 #define __NR_pwritev 321 #define __NR_rt_tgsigqueueinfo 322 +#define __NR_fanotify_init 323 +#define __NR_fanotify_mark 324 +#define __NR_prlimit64 325 #ifdef __KERNEL__ -#define __NR_syscalls 323 +#define __NR_syscalls 326 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 65e2b4e10f97..1f9123f412ec 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1826,7 +1826,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_47X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 417f7b05a9ce..4457382f8667 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -402,6 +402,18 @@ void default_machine_crash_shutdown(struct pt_regs *regs) */ hard_irq_disable(); + /* + * Make a note of crashing cpu. Will be used in machine_kexec + * such that another IPI will not be sent. + */ + crashing_cpu = smp_processor_id(); + crash_save_cpu(regs, crashing_cpu); + crash_kexec_prepare_cpus(crashing_cpu); + cpu_set(crashing_cpu, cpus_in_crash); +#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) + crash_kexec_wait_realmode(crashing_cpu); +#endif + for_each_irq(i) { struct irq_desc *desc = irq_to_desc(i); @@ -438,18 +450,8 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_shutdown_cpu = -1; __debugger_fault_handler = old_handler; - /* - * Make a note of crashing cpu. Will be used in machine_kexec - * such that another IPI will not be sent. - */ - crashing_cpu = smp_processor_id(); - crash_save_cpu(regs, crashing_cpu); - crash_kexec_prepare_cpus(crashing_cpu); - cpu_set(crashing_cpu, cpus_in_crash); crash_kexec_stop_spus(); -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) - crash_kexec_wait_realmode(crashing_cpu); -#endif + if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); } diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 5ab484ef06a7..562305b40a8e 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -113,6 +113,10 @@ _ENTRY(_start); stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ stw r6, 0(r5) + /* Clear the Machine Check Syndrome Register */ + li r0,0 + mtspr SPRN_MCSR,r0 + /* Let's move on */ lis r4,start_kernel@h ori r4,r4,start_kernel@l diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 844a44b64472..4d6681dce816 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -572,9 +572,6 @@ __secondary_start: /* Set thread priority to MEDIUM */ HMT_MEDIUM - /* Do early setup for that CPU (stab, slb, hash table pointer) */ - bl .early_setup_secondary - /* Initialize the kernel stack. Just a repeat for iSeries. */ LOAD_REG_ADDR(r3, current_set) sldi r28,r24,3 /* get current_set[cpu#] */ @@ -582,6 +579,9 @@ __secondary_start: addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD std r1,PACAKSAVE(r13) + /* Do early setup for that CPU (stab, slb, hash table pointer) */ + bl .early_setup_secondary + /* Clear backchain so we get nice backtraces */ li r7,0 mtlr r7 diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 049dda60e475..39a2baa6ad58 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -94,9 +94,9 @@ void cpu_idle(void) HMT_medium(); ppc64_runlatch_on(); tick_nohz_restart_sched_tick(); + preempt_enable_no_resched(); if (cpu_should_die()) cpu_die(); - preempt_enable_no_resched(); schedule(); preempt_disable(); } diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d3ce67cf03be..4a65386995d7 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -67,6 +67,7 @@ #include <asm/machdep.h> #include <asm/udbg.h> #include <asm/dbell.h> +#include <asm/smp.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -446,22 +447,23 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; void exc_lvl_ctx_init(void) { struct thread_info *tp; - int i; + int i, hw_cpu; for_each_possible_cpu(i) { - memset((void *)critirq_ctx[i], 0, THREAD_SIZE); - tp = critirq_ctx[i]; + hw_cpu = get_hard_smp_processor_id(i); + memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); + tp = critirq_ctx[hw_cpu]; tp->cpu = i; tp->preempt_count = 0; #ifdef CONFIG_BOOKE - memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); - tp = dbgirq_ctx[i]; + memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); + tp = dbgirq_ctx[hw_cpu]; tp->cpu = i; tp->preempt_count = 0; - memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); - tp = mcheckirq_ctx[i]; + memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); + tp = mcheckirq_ctx[hw_cpu]; tp->cpu = i; tp->preempt_count = HARDIRQ_OFFSET; #endif diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 6ddb795f83e8..e751506323b4 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -336,7 +336,7 @@ static void __devinit __of_scan_bus(struct device_node *node, if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { struct device_node *child = pci_device_to_OF_node(dev); - if (dev) + if (child) of_scan_pci_bridge(child, dev); } } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 91356ffda2ca..b1c648a36b03 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -728,7 +728,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.regs = childregs; if (clone_flags & CLONE_SETTLS) { #ifdef CONFIG_PPC64 - if (!test_thread_flag(TIF_32BIT)) + if (!is_32bit_task()) childregs->gpr[13] = childregs->gpr[6]; else #endif @@ -823,7 +823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) regs->nip = start; regs->msr = MSR_USER; #else - if (!test_thread_flag(TIF_32BIT)) { + if (!is_32bit_task()) { unsigned long entry, toc; /* start is a relocated pointer to the function descriptor for @@ -995,7 +995,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp, if (usp == 0) usp = regs->gpr[1]; /* stack pointer for child */ #ifdef CONFIG_PPC64 - if (test_thread_flag(TIF_32BIT)) { + if (is_32bit_task()) { parent_tidp = TRUNC_PTR(parent_tidp); child_tidp = TRUNC_PTR(child_tidp); } @@ -1199,19 +1199,17 @@ void ppc64_runlatch_on(void) } } -void ppc64_runlatch_off(void) +void __ppc64_runlatch_off(void) { unsigned long ctrl; - if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { - HMT_medium(); + HMT_medium(); - clear_thread_flag(TIF_RUNLATCH); + clear_thread_flag(TIF_RUNLATCH); - ctrl = mfspr(SPRN_CTRLF); - ctrl &= ~CTRL_RUNLATCH; - mtspr(SPRN_CTRLT, ctrl); - } + ctrl = mfspr(SPRN_CTRLF); + ctrl &= ~CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); } #endif diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a10ffc85ada7..93666f9cabf1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -258,17 +258,18 @@ static void __init irqstack_early_init(void) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) static void __init exc_lvl_early_init(void) { - unsigned int i; + unsigned int i, hw_cpu; /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { - critirq_ctx[i] = (struct thread_info *) + hw_cpu = get_hard_smp_processor_id(i); + critirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE - dbgirq_ctx[i] = (struct thread_info *) + dbgirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); - mcheckirq_ctx[i] = (struct thread_info *) + mcheckirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #endif } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 1bee4b68fa45..e72690ec9b87 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -95,7 +95,7 @@ int ucache_bsize; #ifdef CONFIG_SMP -static int smt_enabled_cmdline; +static char *smt_enabled_cmdline; /* Look for ibm,smt-enabled OF option */ static void check_smt_enabled(void) @@ -103,37 +103,46 @@ static void check_smt_enabled(void) struct device_node *dn; const char *smt_option; - /* Allow the command line to overrule the OF option */ - if (smt_enabled_cmdline) - return; - - dn = of_find_node_by_path("/options"); - - if (dn) { - smt_option = of_get_property(dn, "ibm,smt-enabled", NULL); + /* Default to enabling all threads */ + smt_enabled_at_boot = threads_per_core; - if (smt_option) { - if (!strcmp(smt_option, "on")) - smt_enabled_at_boot = 1; - else if (!strcmp(smt_option, "off")) - smt_enabled_at_boot = 0; - } - } + /* Allow the command line to overrule the OF option */ + if (smt_enabled_cmdline) { + if (!strcmp(smt_enabled_cmdline, "on")) + smt_enabled_at_boot = threads_per_core; + else if (!strcmp(smt_enabled_cmdline, "off")) + smt_enabled_at_boot = 0; + else { + long smt; + int rc; + + rc = strict_strtol(smt_enabled_cmdline, 10, &smt); + if (!rc) + smt_enabled_at_boot = + min(threads_per_core, (int)smt); + } + } else { + dn = of_find_node_by_path("/options"); + if (dn) { + smt_option = of_get_property(dn, "ibm,smt-enabled", + NULL); + + if (smt_option) { + if (!strcmp(smt_option, "on")) + smt_enabled_at_boot = threads_per_core; + else if (!strcmp(smt_option, "off")) + smt_enabled_at_boot = 0; + } + + of_node_put(dn); + } + } } /* Look for smt-enabled= cmdline option */ static int __init early_smt_enabled(char *p) { - smt_enabled_cmdline = 1; - - if (!p) - return 0; - - if (!strcmp(p, "on") || !strcmp(p, "1")) - smt_enabled_at_boot = 1; - else if (!strcmp(p, "off") || !strcmp(p, "0")) - smt_enabled_at_boot = 0; - + smt_enabled_cmdline = p; return 0; } early_param("smt-enabled", early_smt_enabled); @@ -380,8 +389,8 @@ void __init setup_system(void) */ xmon_setup(); - check_smt_enabled(); smp_setup_cpu_maps(); + check_smt_enabled(); #ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a61b3ddd7bb3..0008bc58e826 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -427,11 +427,11 @@ int __cpuinit __cpu_up(unsigned int cpu) #endif if (!cpu_callin_map[cpu]) { - printk("Processor %u is stuck.\n", cpu); + printk(KERN_ERR "Processor %u is stuck.\n", cpu); return -ENOENT; } - printk("Processor %u found.\n", cpu); + DBG("Processor %u found.\n", cpu); if (smp_ops->give_timebase) smp_ops->give_timebase(); diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 20fd701a686a..b1b6043a56c4 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -616,3 +616,11 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, return sys_sync_file_range(fd, offset, nbytes, flags); } + +asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags, + unsigned mask_hi, unsigned mask_lo, + int dfd, const char __user *pathname) +{ + u64 mask = ((u64)mask_hi << 32) | mask_lo; + return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); +} diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 00b9436f7652..fa3469ddaef8 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1059,7 +1059,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) if (!dma_window) return NULL; - tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); if (tbl == NULL) return NULL; @@ -1072,6 +1072,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; tbl->it_busno = 0; tbl->it_type = TCE_VB; + tbl->it_blocksize = 16; return iommu_init_table(tbl, -1); } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 71f1415e2472..ace85fa74b29 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -79,7 +79,9 @@ #endif /* CONFIG_PPC_STD_MMU_64 */ phys_addr_t memstart_addr = ~0; +EXPORT_SYMBOL_GPL(memstart_addr); phys_addr_t kernstart_addr; +EXPORT_SYMBOL_GPL(kernstart_addr); void free_initmem(void) { diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index cfa768203d08..b9d9fed8f36e 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -200,6 +200,7 @@ _GLOBAL(_tlbivax_bcast) rlwimi r5,r4,0,16,31 wrteei 0 mtspr SPRN_MMUCR,r5 + isync /* tlbivax 0,r3 - use .long to avoid binutils deps */ .long 0x7c000624 | (r3 << 11) isync diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d1663db7810f..81c9208025fa 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -106,8 +106,7 @@ config MMIO_NVRAM config MPIC_U3_HT_IRQS bool - depends on PPC_MAPLE - default y + default n config MPIC_BROKEN_REGREAD bool diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 58b13ce3847e..26a067122a54 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -477,7 +477,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, ioid = cell_iommu_get_ioid(np); - window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); + window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); BUG_ON(window == NULL); window->offset = offset; diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index ce61cea0afb5..d8b76335bd13 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c @@ -184,7 +184,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev) BUG_ON(lsn == NULL); - tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); + tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL); iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl); diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 39df6ab1735a..df423993f175 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -2873,12 +2873,11 @@ set_initial_features(void) /* Switch airport off */ for_each_node_by_name(np, "radio") { - if (np && np->parent == macio_chips[0].of_node) { + if (np->parent == macio_chips[0].of_node) { macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON; core99_airport_enable(np, 0, 0); } } - of_node_put(np); } /* On all machines that support sound PM, switch sound off */ diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index ab2027cdf893..3bc075c788ef 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -1155,13 +1155,11 @@ void __init pmac_pcibios_after_init(void) pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0); } } - of_node_put(nd); for_each_node_by_name(nd, "ethernet") { if (nd->parent && of_device_is_compatible(nd, "gmac") && of_device_is_compatible(nd->parent, "uni-north")) pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0); } - of_node_put(nd); } void pmac_pci_fixup_cardbus(struct pci_dev* dev) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 395848e30c52..a77bcaed80af 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -403,7 +403,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) pci->phb->dma_window_size = 0x8000000ul; pci->phb->dma_window_base_cur = 0x8000000ul; - tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, + tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, pci->phb->node); iommu_table_setparms(pci->phb, dn, tbl); @@ -448,7 +448,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) pdn->full_name, ppci->iommu_table); if (!ppci->iommu_table) { - tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, + tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, ppci->phb->node); iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window, bus->number); @@ -478,7 +478,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) struct pci_controller *phb = PCI_DN(dn)->phb; pr_debug(" --> first child, no bridge. Allocating iommu table.\n"); - tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, + tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, phb->node); iommu_table_setparms(phb, dn, tbl); PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); @@ -544,7 +544,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pci = PCI_DN(pdn); if (!pci->iommu_table) { - tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, + tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, pci->phb->node); iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window, pci->phb->bus->number); diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 3b1bf61c45be..0317cce877c6 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -182,10 +182,13 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) /* Special case - we inhibit secondary thread startup * during boot if the user requests it. */ - if (system_state < SYSTEM_RUNNING && - cpu_has_feature(CPU_FTR_SMT) && - !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) - return 0; + if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { + if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) + return 0; + if (smt_enabled_at_boot + && cpu_thread_in_core(nr) >= smt_enabled_at_boot) + return 0; + } return 1; } diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 5b22b07c8f67..93834b0d8272 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -928,8 +928,10 @@ void xics_migrate_irqs_away(void) if (xics_status[0] != hw_cpu) goto unlock; - printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", - virq, cpu); + /* This is expected during cpu offline. */ + if (cpu_online(cpu)) + printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", + virq, cpu); /* Reset affinity to all cpus */ cpumask_setall(irq_to_desc(virq)->affinity); diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 35bc2737412f..2d17e76066bd 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -45,6 +45,7 @@ #include <linux/syscalls.h> #include <linux/suspend.h> #include <linux/cpu.h> +#include <linux/compat.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> @@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp, return ret; } +#ifdef CONFIG_COMPAT +#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t) +#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t) +#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t) +#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t) +#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t) +#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t) + +static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg) +{ + switch (cmd) { + case PMU_IOC_SLEEP: + break; + case PMU_IOC_GET_BACKLIGHT32: + cmd = PMU_IOC_GET_BACKLIGHT; + break; + case PMU_IOC_SET_BACKLIGHT32: + cmd = PMU_IOC_SET_BACKLIGHT; + break; + case PMU_IOC_GET_MODEL32: + cmd = PMU_IOC_GET_MODEL; + break; + case PMU_IOC_HAS_ADB32: + cmd = PMU_IOC_HAS_ADB; + break; + case PMU_IOC_CAN_SLEEP32: + cmd = PMU_IOC_CAN_SLEEP; + break; + case PMU_IOC_GRAB_BACKLIGHT32: + cmd = PMU_IOC_GRAB_BACKLIGHT; + break; + default: + return -ENOIOCTLCMD; + } + return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + static const struct file_operations pmu_device_fops = { .read = pmu_read, .write = pmu_write, .poll = pmu_fpoll, .unlocked_ioctl = pmu_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_pmu_ioctl, +#endif .open = pmu_open, .release = pmu_release, }; |