diff options
Diffstat (limited to 'arch/mips')
55 files changed, 695 insertions, 286 deletions
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index aef6c917b45a..5ce8029f558b 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -16,6 +16,7 @@ platforms += lasat platforms += loongson platforms += mipssim platforms += mti-malta +platforms += netlogic platforms += pmc-sierra platforms += pnx833x platforms += pnx8550 diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 884819cd0607..53e3514ba10e 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -191,18 +191,6 @@ endif # include $(srctree)/arch/mips/Kbuild.platforms -# -# NETLOGIC SOC Common (common) -# -cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic -cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic - -# -# NETLOGIC XLR/XLS SoC, Simulator and boards -# -core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ -load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 - cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic drivers-$(CONFIG_PCI) += arch/mips/pci/ diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c index 2ca4ada1c291..2460f9d23f1b 100644 --- a/arch/mips/ar7/clock.c +++ b/arch/mips/ar7/clock.c @@ -443,7 +443,7 @@ struct clk *clk_get(struct device *dev, const char *id) return &vbus_clk; if (!strcmp(id, "cpu")) return &cpu_clk; - if (!strcmp(id, "dsp")); + if (!strcmp(id, "dsp")) return &dsp_clk; if (!strcmp(id, "vbus")) return &vbus_clk; diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 7d2fab392327..33ffecf6a6d6 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -229,7 +229,7 @@ static struct resource cpmac_low_res[] = { .name = "irq", .flags = IORESOURCE_IRQ, .start = 27, - .end = 27, + .end = 27, }, }; diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c index 23818d299127..8088c6fdb83e 100644 --- a/arch/mips/ar7/prom.c +++ b/arch/mips/ar7/prom.c @@ -77,7 +77,7 @@ struct psp_env_chunk { u16 csum; u8 len; char data[11]; -} __attribute__ ((packed)); +} __packed; struct psp_var_map_entry { u8 num; diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h index 0b89b83e2055..98bcc98cf29b 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h @@ -14,6 +14,7 @@ #define _ASM_FIXMAP_H #include <asm/page.h> +#include <spaces.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> #include <asm/kmap_types.h> @@ -67,15 +68,6 @@ enum fixed_addresses { * the start of the fixmap, and leave one page empty * at the top of mem.. */ -#ifdef CONFIG_BCM63XX -#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000) -#else -#if defined(CONFIG_CPU_TX39XX) || defined(CONFIG_CPU_TX49XX) -#define FIXADDR_TOP ((unsigned long)(long)(int)(0xff000000 - 0x20000)) -#else -#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) -#endif -#endif #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) diff --git a/arch/mips/include/asm/gt64120.h b/arch/mips/include/asm/gt64120.h index e64b41093c49..0aa44abc77fe 100644 --- a/arch/mips/include/asm/gt64120.h +++ b/arch/mips/include/asm/gt64120.h @@ -21,8 +21,6 @@ #ifndef _ASM_GT64120_H #define _ASM_GT64120_H -#include <linux/clocksource.h> - #include <asm/addrspace.h> #include <asm/byteorder.h> diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 0ec01294b063..2354c870a63a 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -18,7 +18,6 @@ static inline void irq_dispose_mapping(unsigned int virq) { - return; } #ifdef CONFIG_I8259 diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 85fd27509aac..0ed5230243c9 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -89,7 +89,6 @@ /* Interrupt Mask register */ #define PERF_IRQMASK_REG 0xc -#define PERF_IRQSTAT_REG 0x10 /* Interrupt Status register */ #define PERF_IRQSTAT_REG 0x10 diff --git a/arch/mips/include/asm/mach-bcm63xx/spaces.h b/arch/mips/include/asm/mach-bcm63xx/spaces.h new file mode 100644 index 000000000000..61e750fb4653 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm63xx/spaces.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_BCM63XX_SPACES_H +#define _ASM_BCM63XX_SPACES_H + +#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000) + +#include <asm/mach-generic/spaces.h> + +#endif /* __ASM_BCM63XX_SPACES_H */ diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 8da98073e952..9c95177f7a7e 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -49,7 +49,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h index c9fa4b14968d..d7a9efd3a5ce 100644 --- a/arch/mips/include/asm/mach-generic/spaces.h +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -82,4 +82,8 @@ #define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET) #endif +#ifndef FIXADDR_TOP +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#endif + #endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h index 016d0989b141..06c441968e6e 100644 --- a/arch/mips/include/asm/mach-ip27/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h @@ -60,7 +60,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h index 302101b54acb..9fc1e9ad7038 100644 --- a/arch/mips/include/asm/mach-jazz/dma-coherence.h +++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h @@ -50,7 +50,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h index 981c75f91a7d..e1433055fe98 100644 --- a/arch/mips/include/asm/mach-loongson/dma-coherence.h +++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h @@ -55,7 +55,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h index 2848cea42bce..37e3583a9fdd 100644 --- a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h @@ -32,6 +32,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ @@ -58,6 +59,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ diff --git a/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h b/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h index 779b02205737..27aaaa5d925e 100644 --- a/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h @@ -31,6 +31,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ @@ -56,6 +57,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ diff --git a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h new file mode 100644 index 000000000000..f751e3ec56fb --- /dev/null +++ b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2010 Cisco Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_ +#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_ +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 +#define cpu_has_vce 0 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_mcheck 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 +#define cpu_has_mips16 0 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 +#define cpu_has_vtag_icache 0 +#define cpu_has_dc_aliases 0 +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_mips32r1 0 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 +#define cpu_has_userlocal 0 +#define cpu_has_nofpuex 0 +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_vint 1 +#define cpu_has_veic 1 +#define cpu_has_inclusive_pcaches 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 +#endif diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h index a8e72cf12142..62c094085947 100644 --- a/arch/mips/include/asm/mach-powertv/dma-coherence.h +++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h @@ -102,7 +102,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-tx39xx/spaces.h b/arch/mips/include/asm/mach-tx39xx/spaces.h new file mode 100644 index 000000000000..151fe7a1cf1d --- /dev/null +++ b/arch/mips/include/asm/mach-tx39xx/spaces.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_TX39XX_SPACES_H +#define _ASM_TX39XX_SPACES_H + +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) + +#include <asm/mach-generic/spaces.h> + +#endif /* __ASM_TX39XX_SPACES_H */ diff --git a/arch/mips/include/asm/mach-tx49xx/spaces.h b/arch/mips/include/asm/mach-tx49xx/spaces.h new file mode 100644 index 000000000000..0cb10a6f489e --- /dev/null +++ b/arch/mips/include/asm/mach-tx49xx/spaces.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_TX49XX_SPACES_H +#define _ASM_TX49XX_SPACES_H + +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) + +#include <asm/mach-generic/spaces.h> + +#endif /* __ASM_TX49XX_SPACES_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 7e40f3778179..b2202a68cf0f 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -414,6 +414,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, * constraints placed on us by the cache architecture. */ #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN /* * No page table caches to initialise diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index 9e09af34c8a8..ef2a8041e78b 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -11,6 +11,8 @@ #ifndef __ASM_SMP_OPS_H #define __ASM_SMP_OPS_H +#include <linux/errno.h> + #ifdef CONFIG_SMP #include <linux/cpumask.h> @@ -56,8 +58,43 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) #endif /* !CONFIG_SMP */ -extern struct plat_smp_ops up_smp_ops; -extern struct plat_smp_ops cmp_smp_ops; -extern struct plat_smp_ops vsmp_smp_ops; +static inline int register_up_smp_ops(void) +{ +#ifdef CONFIG_SMP_UP + extern struct plat_smp_ops up_smp_ops; + + register_smp_ops(&up_smp_ops); + + return 0; +#else + return -ENODEV; +#endif +} + +static inline int register_cmp_smp_ops(void) +{ +#ifdef CONFIG_MIPS_CMP + extern struct plat_smp_ops cmp_smp_ops; + + register_smp_ops(&cmp_smp_ops); + + return 0; +#else + return -ENODEV; +#endif +} + +static inline int register_vsmp_smp_ops(void) +{ +#ifdef CONFIG_MIPS_MT_SMP + extern struct plat_smp_ops vsmp_smp_ops; + + register_smp_ops(&vsmp_smp_ops); + + return 0; +#else + return -ENODEV; +#endif +} #endif /* __ASM_SMP_OPS_H */ diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h index ea60bf08dcb0..c9736fc06325 100644 --- a/arch/mips/include/asm/smtc.h +++ b/arch/mips/include/asm/smtc.h @@ -46,6 +46,7 @@ extern void smtc_prepare_cpus(int cpus); extern void smtc_smp_finish(void); extern void smtc_boot_secondary(int cpu, struct task_struct *t); extern void smtc_cpus_done(void); +extern void smtc_init_secondary(void); /* diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index dcbd4bb417ec..504d40aedfae 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -150,6 +150,7 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh) # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh) # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh) +# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh) # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh) # define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd) # define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd) @@ -165,6 +166,7 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh) # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh) # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) +# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh) # define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd) # define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd) diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 6fcfc480e9d0..ecea7871dec2 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -363,17 +363,18 @@ #define __NR_open_by_handle_at (__NR_Linux + 340) #define __NR_clock_adjtime (__NR_Linux + 341) #define __NR_syncfs (__NR_Linux + 342) -#define __NR_setns (__NR_Linux + 343) +#define __NR_sendmmsg (__NR_Linux + 343) +#define __NR_setns (__NR_Linux + 344) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 343 +#define __NR_Linux_syscalls 344 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 343 +#define __NR_O32_Linux_syscalls 344 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -683,17 +684,18 @@ #define __NR_open_by_handle_at (__NR_Linux + 299) #define __NR_clock_adjtime (__NR_Linux + 300) #define __NR_syncfs (__NR_Linux + 301) -#define __NR_setns (__NR_Linux + 302) +#define __NR_sendmmsg (__NR_Linux + 302) +#define __NR_setns (__NR_Linux + 303) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 302 +#define __NR_Linux_syscalls 303 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 302 +#define __NR_64_Linux_syscalls 303 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1008,17 +1010,18 @@ #define __NR_open_by_handle_at (__NR_Linux + 304) #define __NR_clock_adjtime (__NR_Linux + 305) #define __NR_syncfs (__NR_Linux + 306) -#define __NR_setns (__NR_Linux + 307) +#define __NR_sendmmsg (__NR_Linux + 307) +#define __NR_setns (__NR_Linux + 308) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 307 +#define __NR_Linux_syscalls 308 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 307 +#define __NR_N32_Linux_syscalls 308 #ifdef __KERNEL__ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index bb133d10b145..ebc0cd20b35d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -71,7 +71,6 @@ void r4k_wait_irqoff(void) local_irq_enable(); __asm__(" .globl __pastwait \n" "__pastwait: \n"); - return; } /* diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 6e71b284f6c9..191eb52228c4 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -103,14 +103,12 @@ void __init mips_cpu_irq_init(void) clear_c0_status(ST0_IM); clear_c0_cause(CAUSEF_IP); - /* - * Only MT is using the software interrupts currently, so we just - * leave them uninitialized for other processors. - */ - if (cpu_has_mipsmt) - for (i = irq_base; i < irq_base + 2; i++) - irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller, - handle_percpu_irq); + /* Software interrupts are used for MT/CMT IPI */ + for (i = irq_base; i < irq_base + 2; i++) + irq_set_chip_and_handler(i, cpu_has_mipsmt ? + &mips_mt_cpu_irq_controller : + &mips_cpu_irq_controller, + handle_percpu_irq); for (i = irq_base + 2; i < irq_base + 8; i++) irq_set_chip_and_handler(i, &mips_cpu_irq_controller, diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index d0deaab9ace2..0aee944ac380 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -192,8 +192,6 @@ again: local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); - - return; } static void mipspmu_start(struct perf_event *event, int flags) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c28fbe6107bc..b30cb2573aaf 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -103,7 +103,6 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) __init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; - current_thread_info()->addr_limit = USER_DS; } void exit_thread(void) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 99e656e425f3..e521420a45a5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -589,6 +589,7 @@ einval: li v0, -ENOSYS sys sys_open_by_handle_at 3 /* 4340 */ sys sys_clock_adjtime 2 sys sys_syncfs 1 + sys sys_sendmmsg 4 sys sys_setns 2 .endm diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index fb0575f47f3d..85874d6a8a70 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -428,5 +428,6 @@ sys_call_table: PTR sys_open_by_handle_at PTR sys_clock_adjtime /* 5300 */ PTR sys_syncfs + PTR sys_sendmmsg PTR sys_setns .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4de0c5534e73..b85842fc87ae 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -428,5 +428,6 @@ EXPORT(sysn32_call_table) PTR sys_open_by_handle_at PTR compat_sys_clock_adjtime /* 6305 */ PTR sys_syncfs + PTR compat_sys_sendmmsg PTR sys_setns .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 4a387de08bfa..46c4763edf21 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -546,5 +546,6 @@ sys_call_table: PTR compat_sys_open_by_handle_at /* 4340 */ PTR compat_sys_clock_adjtime PTR sys_syncfs + PTR compat_sys_sendmmsg PTR sys_setns .size sys_call_table,.-sys_call_table diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 94560899d13e..7e9c0ffc11a5 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -100,6 +100,19 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL(clk_put); +int clk_enable(struct clk *clk) +{ + /* not used */ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ + /* not used */ +} +EXPORT_SYMBOL(clk_disable); + static inline u32 ltq_get_counter_resolution(void) { u32 res; diff --git a/arch/mips/loongson/lemote-2f/ec_kb3310b.c b/arch/mips/loongson/lemote-2f/ec_kb3310b.c index 64057244eec5..2b666d3a3947 100644 --- a/arch/mips/loongson/lemote-2f/ec_kb3310b.c +++ b/arch/mips/loongson/lemote-2f/ec_kb3310b.c @@ -45,8 +45,6 @@ void ec_write(unsigned short addr, unsigned char val) /* flush the write action */ inb(EC_IO_PORT_DATA); spin_unlock_irqrestore(&index_access_lock, flags); - - return; } EXPORT_SYMBOL_GPL(ec_write); diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c index 55f22a3afe61..256e0cdaa499 100644 --- a/arch/mips/mipssim/sim_setup.c +++ b/arch/mips/mipssim/sim_setup.c @@ -34,6 +34,7 @@ #include <asm/time.h> #include <asm/mips-boards/sim.h> #include <asm/mips-boards/simint.h> +#include <asm/smp-ops.h> static void __init serial_init(void); @@ -59,18 +60,17 @@ void __init prom_init(void) prom_meminit(); -#ifdef CONFIG_MIPS_MT_SMP - if (cpu_has_mipsmt) - register_smp_ops(&vsmp_smp_ops); - else - register_smp_ops(&up_smp_ops); -#endif + if (cpu_has_mipsmt) { + if (!register_vsmp_smp_ops()) + return; + #ifdef CONFIG_MIPS_MT_SMTC - if (cpu_has_mipsmt) register_smp_ops(&ssmtc_smp_ops); - else - register_smp_ops(&up_smp_ops); + return; #endif + } + + register_up_smp_ops(); } static void __init serial_init(void) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index eeb642e4066e..b9aabb998a32 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -604,6 +604,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) r4k_blast_scache(); else blast_scache_range(addr, addr + size); + __sync(); return; } @@ -620,6 +621,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) } bc_wback_inv(addr, size); + __sync(); } static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) @@ -647,6 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); } + __sync(); return; } @@ -663,6 +666,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) } bc_inv(addr, size); + __sync(); } #endif /* CONFIG_DMA_NONCOHERENT */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 21ea14efb837..46084912e588 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -15,18 +15,18 @@ #include <linux/scatterlist.h> #include <linux/string.h> #include <linux/gfp.h> +#include <linux/highmem.h> #include <asm/cache.h> #include <asm/io.h> #include <dma-coherence.h> -static inline unsigned long dma_addr_to_virt(struct device *dev, +static inline struct page *dma_addr_to_page(struct device *dev, dma_addr_t dma_addr) { - unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr); - - return (unsigned long)phys_to_virt(addr); + return pfn_to_page( + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); } /* @@ -148,20 +148,20 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages(addr, get_order(size)); } -static inline void __dma_sync(unsigned long addr, size_t size, +static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) { switch (direction) { case DMA_TO_DEVICE: - dma_cache_wback(addr, size); + dma_cache_wback((unsigned long)addr, size); break; case DMA_FROM_DEVICE: - dma_cache_inv(addr, size); + dma_cache_inv((unsigned long)addr, size); break; case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(addr, size); + dma_cache_wback_inv((unsigned long)addr, size); break; default: @@ -169,12 +169,49 @@ static inline void __dma_sync(unsigned long addr, size_t size, } } +/* + * A single sg entry may refer to multiple physically contiguous + * pages. But we still need to process highmem pages individually. + * If highmem is not configured then the bulk of this loop gets + * optimized out. + */ +static inline void __dma_sync(struct page *page, + unsigned long offset, size_t size, enum dma_data_direction direction) +{ + size_t left = size; + + do { + size_t len = left; + + if (PageHighMem(page)) { + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + __dma_sync_virtual(addr + offset, len, direction); + kunmap_atomic(addr); + } else + __dma_sync_virtual(page_address(page) + offset, + size, direction); + offset = 0; + page++; + left -= len; + } while (left); +} + static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { if (cpu_is_noncoherent_r10000(dev)) - __dma_sync(dma_addr_to_virt(dev, dma_addr), size, - direction); + __dma_sync(dma_addr_to_page(dev, dma_addr), + dma_addr & ~PAGE_MASK, size, direction); plat_unmap_dma_mem(dev, dma_addr, size, direction); } @@ -185,13 +222,11 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for (i = 0; i < nents; i++, sg++) { - unsigned long addr; - - addr = (unsigned long) sg_virt(sg); - if (!plat_device_is_coherent(dev) && addr) - __dma_sync(addr, sg->length, direction); - sg->dma_address = plat_map_dma_mem(dev, - (void *)addr, sg->length); + if (!plat_device_is_coherent(dev)) + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); + sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + + sg->offset; } return nents; @@ -201,30 +236,23 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - unsigned long addr; - - addr = (unsigned long) page_address(page) + offset; - if (!plat_device_is_coherent(dev)) - __dma_sync(addr, size, direction); + __dma_sync(page, offset, size, direction); - return plat_map_dma_mem(dev, (void *)addr, size); + return plat_map_dma_mem_page(dev, page) + offset; } static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction, struct dma_attrs *attrs) { - unsigned long addr; int i; for (i = 0; i < nhwentries; i++, sg++) { if (!plat_device_is_coherent(dev) && - direction != DMA_TO_DEVICE) { - addr = (unsigned long) sg_virt(sg); - if (addr) - __dma_sync(addr, sg->length, direction); - } + direction != DMA_TO_DEVICE) + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); } } @@ -232,24 +260,18 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, static void mips_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - if (cpu_is_noncoherent_r10000(dev)) { - unsigned long addr; - - addr = dma_addr_to_virt(dev, dma_handle); - __dma_sync(addr, size, direction); - } + if (cpu_is_noncoherent_r10000(dev)) + __dma_sync(dma_addr_to_page(dev, dma_handle), + dma_handle & ~PAGE_MASK, size, direction); } static void mips_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { plat_extra_sync_for_device(dev); - if (!plat_device_is_coherent(dev)) { - unsigned long addr; - - addr = dma_addr_to_virt(dev, dma_handle); - __dma_sync(addr, size, direction); - } + if (!plat_device_is_coherent(dev)) + __dma_sync(dma_addr_to_page(dev, dma_handle), + dma_handle & ~PAGE_MASK, size, direction); } static void mips_dma_sync_sg_for_cpu(struct device *dev, @@ -260,8 +282,8 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev, /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (cpu_is_noncoherent_r10000(dev)) - __dma_sync((unsigned long)page_address(sg_page(sg)), - sg->length, direction); + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); } } @@ -273,8 +295,8 @@ static void mips_dma_sync_sg_for_device(struct device *dev, /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (!plat_device_is_coherent(dev)) - __dma_sync((unsigned long)page_address(sg_page(sg)), - sg->length, direction); + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); } } @@ -295,7 +317,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, plat_extra_sync_for_device(dev); if (!plat_device_is_coherent(dev)) - __dma_sync((unsigned long)vaddr, size, direction); + __dma_sync_virtual(vaddr, size, direction); } EXPORT_SYMBOL(dma_cache_sync); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 1aadeb42c5a5..b7ebc4fa89bc 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -277,11 +277,11 @@ void __init fixrange_init(unsigned long start, unsigned long end, k = __pmd_offset(vaddr); pgd = pgd_base + i; - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { pud = (pud_t *)pgd; - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { pmd = (pmd_t *)pud; - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd((unsigned long)pte)); @@ -368,7 +368,7 @@ void __init mem_init(void) #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif - max_mapnr = highend_pfn; + max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; #else max_mapnr = max_low_pfn; #endif diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index ae3c20a9556e..9ff5d0fac556 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -10,6 +10,7 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> +#include <linux/personality.h> #include <linux/random.h> #include <linux/sched.h> @@ -17,21 +18,65 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); +/* gap between mmap and stack */ +#define MIN_GAP (128*1024*1024UL) +#define MAX_GAP ((TASK_SIZE)/6*5) + +static int mmap_is_legacy(void) +{ + if (current->personality & ADDR_COMPAT_LAYOUT) + return 1; + + if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) + return 1; + + return sysctl_legacy_va_layout; +} + +static unsigned long mmap_base(unsigned long rnd) +{ + unsigned long gap = rlimit(RLIMIT_STACK); + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + + return PAGE_ALIGN(TASK_SIZE - gap - rnd); +} + +static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = addr & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + if (base + off <= addr) + return base + off; + + return base - off; +} + #define COLOUR_ALIGN(addr,pgoff) \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \ (((pgoff) << PAGE_SHIFT) & shm_align_mask)) -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) +enum mmap_allocation_direction {UP, DOWN}; + +static unsigned long arch_get_unmapped_area_foo(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags, enum mmap_allocation_direction dir) { - struct vm_area_struct * vmm; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long addr = addr0; int do_color_align; - if (len > TASK_SIZE) + if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { - /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ + /* Even MAP_FIXED mappings must reside within TASK_SIZE */ if (TASK_SIZE - len < addr) return -EINVAL; @@ -48,34 +93,130 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + + /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); - vmm = find_vma(current->mm, addr); + + vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vmm || addr + len <= vmm->vm_start)) + (!vma || addr + len <= vma->vm_start)) return addr; } - addr = current->mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) - return addr; - addr = vmm->vm_end; + if (dir == UP) { + addr = mm->mmap_base; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vma || addr + len <= vma->vm_start) + return addr; + addr = vma->vm_end; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + } + } else { + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + if (do_color_align) { + unsigned long base = + COLOUR_ALIGN_DOWN(addr - len, pgoff); + + addr = base + len; + } + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr - len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr-len; + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = mm->mmap_base-len; if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr; + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + if (do_color_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; } } +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + return arch_get_unmapped_area_foo(filp, + addr0, len, pgoff, flags, UP); +} + +/* + * There is no need to export this but sched.h declares the function as + * extern so making it static here results in an error. + */ +unsigned long arch_get_unmapped_area_topdown(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return arch_get_unmapped_area_foo(filp, + addr0, len, pgoff, flags, DOWN); +} + void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; @@ -89,9 +230,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) random_factor &= 0xffffffful; } - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(random_factor); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } } static inline unsigned long brk_rnd(void) diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 575e4019227b..adc6911ba748 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -52,7 +52,7 @@ void __init pagetable_init(void) * Fixed mappings: */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, 0, pgd_base); + fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); #ifdef CONFIG_HIGHMEM /* diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 78eaa4f0b0ec..cda4e300eb0a 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -76,5 +76,5 @@ void __init pagetable_init(void) * Fixed mappings: */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, 0, pgd_base); + fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 424ed4b92e6d..b6e1cff50667 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -42,6 +42,18 @@ extern void tlb_do_page_fault_0(void); extern void tlb_do_page_fault_1(void); +struct work_registers { + int r1; + int r2; + int r3; +}; + +struct tlb_reg_save { + unsigned long a; + unsigned long b; +} ____cacheline_aligned_in_smp; + +static struct tlb_reg_save handler_reg_save[NR_CPUS]; static inline int r45k_bvahwbug(void) { @@ -248,6 +260,73 @@ static int scratch_reg __cpuinitdata; static int pgd_reg __cpuinitdata; enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; +static struct work_registers __cpuinit build_get_work_registers(u32 **p) +{ + struct work_registers r; + + int smp_processor_id_reg; + int smp_processor_id_sel; + int smp_processor_id_shift; + + if (scratch_reg > 0) { + /* Save in CPU local C0_KScratch? */ + UASM_i_MTC0(p, 1, 31, scratch_reg); + r.r1 = K0; + r.r2 = K1; + r.r3 = 1; + return r; + } + + if (num_possible_cpus() > 1) { +#ifdef CONFIG_MIPS_PGD_C0_CONTEXT + smp_processor_id_shift = 51; + smp_processor_id_reg = 20; /* XContext */ + smp_processor_id_sel = 0; +#else +# ifdef CONFIG_32BIT + smp_processor_id_shift = 25; + smp_processor_id_reg = 4; /* Context */ + smp_processor_id_sel = 0; +# endif +# ifdef CONFIG_64BIT + smp_processor_id_shift = 26; + smp_processor_id_reg = 4; /* Context */ + smp_processor_id_sel = 0; +# endif +#endif + /* Get smp_processor_id */ + UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); + UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); + + /* handler_reg_save index in K0 */ + UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); + + UASM_i_LA(p, K1, (long)&handler_reg_save); + UASM_i_ADDU(p, K0, K0, K1); + } else { + UASM_i_LA(p, K0, (long)&handler_reg_save); + } + /* K0 now points to save area, save $1 and $2 */ + UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); + UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); + + r.r1 = K1; + r.r2 = 1; + r.r3 = 2; + return r; +} + +static void __cpuinit build_restore_work_registers(u32 **p) +{ + if (scratch_reg > 0) { + UASM_i_MFC0(p, 1, 31, scratch_reg); + return; + } + /* K0 already points to save area, restore $1 and $2 */ + UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); + UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); +} + #ifndef CONFIG_MIPS_PGD_C0_CONTEXT /* @@ -1160,9 +1239,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); - if (scratch_reg == 0) - scratch_reg = allocate_kscratch(); - if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, scratch_reg); @@ -1462,22 +1538,28 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, */ static void __cpuinit build_pte_present(u32 **p, struct uasm_reloc **r, - unsigned int pte, unsigned int ptr, enum label_id lid) + int pte, int ptr, int scratch, enum label_id lid) { + int t = scratch >= 0 ? scratch : pte; + if (kernel_uses_smartmips_rixi) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); } else { - uasm_i_andi(p, pte, pte, _PAGE_PRESENT); - uasm_il_beqz(p, r, pte, lid); - iPTE_LW(p, pte, ptr); + uasm_i_andi(p, t, pte, _PAGE_PRESENT); + uasm_il_beqz(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ + iPTE_LW(p, pte, ptr); } } else { - uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); - uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); - uasm_il_bnez(p, r, pte, lid); - iPTE_LW(p, pte, ptr); + uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); + uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); + uasm_il_bnez(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ + iPTE_LW(p, pte, ptr); } } @@ -1497,19 +1579,19 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, */ static void __cpuinit build_pte_writable(u32 **p, struct uasm_reloc **r, - unsigned int pte, unsigned int ptr, enum label_id lid) + unsigned int pte, unsigned int ptr, int scratch, + enum label_id lid) { - if (use_bbit_insns()) { - uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); - uasm_i_nop(p); - uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); - uasm_i_nop(p); - } else { - uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); - uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); - uasm_il_bnez(p, r, pte, lid); + int t = scratch >= 0 ? scratch : pte; + + uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); + uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); + uasm_il_bnez(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); - } + else + uasm_i_nop(p); } /* Make PTE writable, update software status bits as well, then store @@ -1531,15 +1613,19 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, */ static void __cpuinit build_pte_modifiable(u32 **p, struct uasm_reloc **r, - unsigned int pte, unsigned int ptr, enum label_id lid) + unsigned int pte, unsigned int ptr, int scratch, + enum label_id lid) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); uasm_i_nop(p); } else { - uasm_i_andi(p, pte, pte, _PAGE_WRITE); - uasm_il_beqz(p, r, pte, lid); - iPTE_LW(p, pte, ptr); + int t = scratch >= 0 ? scratch : pte; + uasm_i_andi(p, t, pte, _PAGE_WRITE); + uasm_il_beqz(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ + iPTE_LW(p, pte, ptr); } } @@ -1619,7 +1705,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ build_make_valid(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1649,7 +1735,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1673,13 +1759,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); @@ -1702,15 +1789,16 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) /* * R4000 style TLB load/store/modify handlers. */ -static void __cpuinit +static struct work_registers __cpuinit build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, unsigned int pte, - unsigned int ptr) + struct uasm_reloc **r) { + struct work_registers wr = build_get_work_registers(p); + #ifdef CONFIG_64BIT - build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ + build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ #else - build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ + build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ #endif #ifdef CONFIG_HUGETLB_PAGE @@ -1719,21 +1807,22 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); + build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); #endif - UASM_i_MFC0(p, pte, C0_BADVADDR); - UASM_i_LW(p, ptr, 0, ptr); - UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); - uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); - UASM_i_ADDU(p, ptr, ptr, pte); + UASM_i_MFC0(p, wr.r1, C0_BADVADDR); + UASM_i_LW(p, wr.r2, 0, wr.r2); + UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); + uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); + UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); #ifdef CONFIG_SMP uasm_l_smp_pgtable_change(l, *p); #endif - iPTE_LW(p, pte, ptr); /* get even pte */ + iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ if (!m4kc_tlbp_war()) build_tlb_probe_entry(p); + return wr; } static void __cpuinit @@ -1746,6 +1835,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, build_update_entries(p, tmp, ptr); build_tlb_write_entry(p, l, r, tlb_indexed); uasm_l_leave(l, *p); + build_restore_work_registers(p); uasm_i_eret(p); /* return from trap */ #ifdef CONFIG_64BIT @@ -1758,6 +1848,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) u32 *p = handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); @@ -1777,8 +1868,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) /* No need for uasm_i_nop */ } - build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + wr = build_r4000_tlbchange_handler_head(&p, &l, &r); + build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); @@ -1788,44 +1879,43 @@ static void __cpuinit build_r4000_tlb_load_handler(void) * have triggered it. Skip the expensive test.. */ if (use_bbit_insns()) { - uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), + uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), label_tlbl_goaround1); } else { - uasm_i_andi(&p, K0, K0, _PAGE_VALID); - uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); + uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); + uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); } uasm_i_nop(&p); uasm_i_tlbr(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { - uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); + uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); } else { - uasm_i_andi(&p, K0, K1, sizeof(pte_t)); - uasm_i_beqz(&p, K0, 8); + uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); + uasm_i_beqz(&p, wr.r3, 8); } - - UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ - UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ + /* load it in the delay slot*/ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); + /* load it if ptr is odd */ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); /* - * If the entryLo (now in K0) is valid (bit 1), RI or + * If the entryLo (now in wr.r3) is valid (bit 1), RI or * XI must have triggered it. */ if (use_bbit_insns()) { - uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl); - /* Reload the PTE value */ - iPTE_LW(&p, K0, K1); + uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); + uasm_i_nop(&p); uasm_l_tlbl_goaround1(&l, p); } else { - uasm_i_andi(&p, K0, K0, 2); - uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); - uasm_l_tlbl_goaround1(&l, p); - /* Reload the PTE value */ - iPTE_LW(&p, K0, K1); + uasm_i_andi(&p, wr.r3, wr.r3, 2); + uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); + uasm_i_nop(&p); } + uasm_l_tlbl_goaround1(&l, p); } - build_make_valid(&p, &r, K0, K1); - build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); + build_make_valid(&p, &r, wr.r1, wr.r2); + build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* @@ -1833,8 +1923,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) * spots a huge page. */ uasm_l_tlb_huge_update(&l, p); - iPTE_LW(&p, K0, K1); - build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + iPTE_LW(&p, wr.r1, wr.r2); + build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); build_tlb_probe_entry(&p); if (kernel_uses_smartmips_rixi) { @@ -1843,50 +1933,51 @@ static void __cpuinit build_r4000_tlb_load_handler(void) * have triggered it. Skip the expensive test.. */ if (use_bbit_insns()) { - uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), + uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), label_tlbl_goaround2); } else { - uasm_i_andi(&p, K0, K0, _PAGE_VALID); - uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); + uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); + uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } uasm_i_nop(&p); uasm_i_tlbr(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { - uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); + uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); } else { - uasm_i_andi(&p, K0, K1, sizeof(pte_t)); - uasm_i_beqz(&p, K0, 8); + uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); + uasm_i_beqz(&p, wr.r3, 8); } - UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ - UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ + /* load it in the delay slot*/ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); + /* load it if ptr is odd */ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); /* - * If the entryLo (now in K0) is valid (bit 1), RI or + * If the entryLo (now in wr.r3) is valid (bit 1), RI or * XI must have triggered it. */ if (use_bbit_insns()) { - uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2); + uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); } else { - uasm_i_andi(&p, K0, K0, 2); - uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); + uasm_i_andi(&p, wr.r3, wr.r3, 2); + uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } - /* Reload the PTE value */ - iPTE_LW(&p, K0, K1); /* * We clobbered C0_PAGEMASK, restore it. On the other branch * it is restored in build_huge_tlb_write_entry. */ - build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0); + build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); uasm_l_tlbl_goaround2(&l, p); } - uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); - build_huge_handler_tail(&p, &r, &l, K0, K1); + uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbl(&l, p); + build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); @@ -1905,17 +1996,18 @@ static void __cpuinit build_r4000_tlb_store_handler(void) u32 *p = handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + wr = build_r4000_tlbchange_handler_head(&p, &l, &r); + build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - build_make_write(&p, &r, K0, K1); - build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); + build_make_write(&p, &r, wr.r1, wr.r2); + build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* @@ -1923,15 +2015,16 @@ static void __cpuinit build_r4000_tlb_store_handler(void) * build_r4000_tlbchange_handler_head spots a huge page. */ uasm_l_tlb_huge_update(&l, p); - iPTE_LW(&p, K0, K1); - build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + iPTE_LW(&p, wr.r1, wr.r2); + build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); build_tlb_probe_entry(&p); - uasm_i_ori(&p, K0, K0, + uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, K0, K1); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbs(&l, p); + build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -1950,18 +2043,19 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + wr = build_r4000_tlbchange_handler_head(&p, &l, &r); + build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ - build_make_write(&p, &r, K0, K1); - build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); + build_make_write(&p, &r, wr.r1, wr.r2); + build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* @@ -1969,15 +2063,16 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) * build_r4000_tlbchange_handler_head spots a huge page. */ uasm_l_tlb_huge_update(&l, p); - iPTE_LW(&p, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + iPTE_LW(&p, wr.r1, wr.r2); + build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); build_tlb_probe_entry(&p); - uasm_i_ori(&p, K0, K0, + uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, K0, K1); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbm(&l, p); + build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -2036,6 +2131,7 @@ void __cpuinit build_tlb_refill_handler(void) default: if (!run_once) { + scratch_reg = allocate_kscratch(); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT build_r4000_setup_pgd(); #endif diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 31180c321a1a..4b988b9a30d5 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -28,6 +28,7 @@ #include <asm/io.h> #include <asm/system.h> #include <asm/cacheflush.h> +#include <asm/smp-ops.h> #include <asm/traps.h> #include <asm/gcmpregs.h> @@ -358,15 +359,14 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif -#ifdef CONFIG_MIPS_CMP /* Early detection of CMP support */ if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) - register_smp_ops(&cmp_smp_ops); - else -#endif -#ifdef CONFIG_MIPS_MT_SMP - register_smp_ops(&vsmp_smp_ops); -#endif + if (!register_cmp_smp_ops()) + return; + + if (!register_vsmp_smp_ops()) + return; + #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&msmtc_smp_ops); #endif diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 49a38b09a488..1efc8c394486 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c @@ -152,7 +152,7 @@ int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, * runtime code can anyway deal with the null set */ printk(KERN_WARNING - "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); + "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq); /* Do any generic SMTC IRQ affinity setup */ smtc_set_irq_affinity(d->irq, tmask); diff --git a/arch/mips/netlogic/Platform b/arch/mips/netlogic/Platform new file mode 100644 index 000000000000..f87c1640abb5 --- /dev/null +++ b/arch/mips/netlogic/Platform @@ -0,0 +1,11 @@ +# +# NETLOGIC includes +# +cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic +cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic + +# +# NETLOGIC XLR/XLS SoC, Simulator and boards +# +core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ +load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 diff --git a/arch/mips/netlogic/xlr/irq.c b/arch/mips/netlogic/xlr/irq.c index 1446d58e364c..521bb7377eb0 100644 --- a/arch/mips/netlogic/xlr/irq.c +++ b/arch/mips/netlogic/xlr/irq.c @@ -209,7 +209,7 @@ void __init init_xlr_irqs(void) irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq); else irq_set_chip_and_handler(i, &nlm_cpu_intr, - handle_level_irq); + handle_percpu_irq); } #ifdef CONFIG_SMP irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, diff --git a/arch/mips/netlogic/xlr/smp.c b/arch/mips/netlogic/xlr/smp.c index b495a7f1433b..d842bce5c940 100644 --- a/arch/mips/netlogic/xlr/smp.c +++ b/arch/mips/netlogic/xlr/smp.c @@ -87,17 +87,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) /* IRQ_IPI_SMP_RESCHEDULE handler */ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) { - set_need_resched(); -} - -void nlm_common_ipi_handler(int irq, struct pt_regs *regs) -{ - if (irq == IRQ_IPI_SMP_FUNCTION) { - smp_call_function_interrupt(); - } else { - /* Announce that we are for reschduling */ - set_need_resched(); - } + scheduler_ipi(); } /* @@ -122,6 +112,7 @@ void nlm_smp_finish(void) #ifdef notyet nlm_common_msgring_cpu_init(); #endif + local_irq_enable(); } void nlm_cpus_done(void) diff --git a/arch/mips/nxp/pnx8550/common/setup.c b/arch/mips/nxp/pnx8550/common/setup.c index 64246c9c875c..71adac323323 100644 --- a/arch/mips/nxp/pnx8550/common/setup.c +++ b/arch/mips/nxp/pnx8550/common/setup.c @@ -140,6 +140,4 @@ void __init plat_mem_setup(void) PNX8XXX_UART_LCR_8BIT; ip3106_baud(UART_BASE, pnx8550_console_port) = 5; } - - return; } diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c index b7f0fb0210f4..99929cf88419 100644 --- a/arch/mips/pci/ops-nile4.c +++ b/arch/mips/pci/ops-nile4.c @@ -4,7 +4,6 @@ #include <asm/bootinfo.h> #include <asm/lasat/lasat.h> -#include <asm/gt64120.h> #include <asm/nile4.h> #define PCI_ACCESS_READ 0 diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmc-sierra/msp71xx/msp_setup.c index 2413ea67877e..0abfbe04ffc9 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_setup.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_setup.c @@ -228,13 +228,11 @@ void __init prom_init(void) */ msp_serial_setup(); -#ifdef CONFIG_MIPS_MT_SMP - register_smp_ops(&vsmp_smp_ops); -#endif - + if (register_vsmp_smp_ops()) { #ifdef CONFIG_MIPS_MT_SMTC - register_smp_ops(&msp_smtc_smp_ops); + register_smp_ops(&msp_smtc_smp_ops); #endif + } #ifdef CONFIG_PMCTWILED /* diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c index 43cb3945fdbf..fccd6b0c6d3f 100644 --- a/arch/mips/pnx8550/common/setup.c +++ b/arch/mips/pnx8550/common/setup.c @@ -139,6 +139,4 @@ void __init plat_mem_setup(void) PNX8XXX_UART_LCR_8BIT; ip3106_baud(UART_BASE, pnx8550_console_port) = 5; } - - return; } diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 041fc1afc3f4..a969eb826634 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -251,28 +251,22 @@ static struct platform_device *rb532_devs[] = { static void __init parse_mac_addr(char *macstr) { - int i, j; - unsigned char result, value; + int i, h, l; for (i = 0; i < 6; i++) { - result = 0; - if (i != 5 && *(macstr + 2) != ':') return; - for (j = 0; j < 2; j++) { - if (isxdigit(*macstr) - && (value = - isdigit(*macstr) ? *macstr - - '0' : toupper(*macstr) - 'A' + 10) < 16) { - result = result * 16 + value; - macstr++; - } else - return; - } + h = hex_to_bin(*macstr++); + if (h == -1) + return; + + l = hex_to_bin(*macstr++); + if (l == -1) + return; macstr++; - korina_dev0_data.mac[i] = result; + korina_dev0_data.mac[i] = (h << 4) + l; } } diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index be4460a5f6a8..76ee045e2ce4 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -123,6 +123,13 @@ static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask, } #endif +static void disable_sb1250_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + sb1250_mask_irq(sb1250_irq_owner[irq], irq); +} + static void enable_sb1250_irq(struct irq_data *d) { unsigned int irq = d->irq; @@ -180,6 +187,7 @@ static struct irq_chip sb1250_irq_type = { .name = "SB1250-IMR", .irq_mask_ack = ack_sb1250_irq, .irq_unmask = enable_sb1250_irq, + .irq_mask = disable_sb1250_irq, #ifdef CONFIG_SMP .irq_set_affinity = sb1250_set_affinity #endif |