diff options
Diffstat (limited to 'arch/mips')
257 files changed, 6473 insertions, 4242 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4e238e6e661c..ad6badb6be71 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -15,7 +15,6 @@ config MIPS select HAVE_BPF_JIT if !CPU_MICROMIPS select ARCH_HAVE_CUSTOM_GPIO_H select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_C_RECORDMCOUNT @@ -30,6 +29,7 @@ config MIPS select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select HAVE_DMA_ATTRS + select HAVE_DMA_CONTIGUOUS select HAVE_DMA_API_DEBUG select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -72,6 +72,7 @@ config MIPS_ALCHEMY select SYS_SUPPORTS_APM_EMULATION select ARCH_REQUIRE_GPIOLIB select SYS_SUPPORTS_ZBOOT + select COMMON_CLK config AR7 bool "Texas Instruments AR7" @@ -130,6 +131,8 @@ config BCM47XX select SYS_SUPPORTS_MIPS16 select SYS_HAS_EARLY_PRINTK select USE_GENERIC_EARLY_PRINTK_8250 + select GPIOLIB + select LEDS_GPIO_REGISTER help Support for BCM47XX based boards @@ -138,6 +141,7 @@ config BCM63XX select BOOT_RAW select CEVT_R4K select CSRC_R4K + select SYNC_R4K select DMA_NONCOHERENT select IRQ_CPU select SYS_SUPPORTS_32BIT_KERNEL @@ -350,6 +354,7 @@ config MIPS_SEAD3 bool "MIPS SEAD3 board" select BOOT_ELF32 select BOOT_RAW + select BUILTIN_DTB select CEVT_R4K select CSRC_R4K select CSRC_GIC @@ -543,6 +548,7 @@ config SGI_IP28 # select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN + select MIPS_L1_CACHE_SHIFT_7 help This is the SGI Indigo2 with R10000 processor. To compile a Linux kernel that runs on these, say Y here. @@ -738,6 +744,7 @@ config CAVIUM_OCTEON_SOC select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_SMP select NR_CPUS_DEFAULT_16 + select BUILTIN_DTB help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@ -2026,7 +2033,9 @@ config MIPS_CMP bool "MIPS CMP framework support (DEPRECATED)" depends on SYS_SUPPORTS_MIPS_CMP select MIPS_GIC_IPI + select SMP select SYNC_R4K + select SYS_SUPPORTS_SMP select WEAK_ORDERING default n help @@ -2057,6 +2066,7 @@ config MIPS_CPS support is unavailable. config MIPS_CPS_PM + select MIPS_CPC bool config MIPS_GIC_IPI @@ -2110,9 +2120,9 @@ config CPU_MICROMIPS microMIPS ISA config CPU_HAS_MSA - bool "Support for the MIPS SIMD Architecture" + bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)" depends on CPU_SUPPORTS_MSA - default y + depends on 64BIT || MIPS_O32_FP64_SUPPORT help MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers and a set of SIMD instructions to operate on them. When this option @@ -2475,6 +2485,9 @@ config USE_OF select OF_EARLY_FLATTREE select IRQ_DOMAIN +config BUILTIN_DTB + bool + endmenu config LOCKDEP_SUPPORT diff --git a/arch/mips/Makefile b/arch/mips/Makefile index a8521de14791..23cb94806fbc 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -113,7 +113,16 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) -cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) +# For smartmips configurations, there are hundreds of warnings due to ISA overrides +# in assembly and header files. smartmips is only supported for MIPS32r1 onwards +# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or +# similar directives in the kernel will spam the build logs with the following warnings: +# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater +# or +# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension +# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has +# been fixed properly. +cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ @@ -151,8 +160,10 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \ -Wa,--trap cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ -Wa,--trap -cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \ +cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ -Wa,--trap +cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mdmx) +cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mips3d) cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ -Wa,--trap @@ -322,6 +333,16 @@ endif CLEAN_FILES += vmlinux.32 vmlinux.64 +# device-trees +core-$(CONFIG_BUILTIN_DTB) += arch/mips/boot/dts/ + +%.dtb %.dtb.S %.dtb.o: | scripts + $(Q)$(MAKE) $(build)=arch/mips/boot/dts arch/mips/boot/dts/$@ + +PHONY += dtbs +dtbs: scripts + $(Q)$(MAKE) $(build)=arch/mips/boot/dts dtbs + archprepare: ifdef CONFIG_MIPS32_N32 @echo ' Checking missing-syscalls for N32' @@ -356,6 +377,7 @@ define archhelp echo ' vmlinuz.srec - SREC zboot image' echo ' uImage - U-Boot image' echo ' uImage.gz - U-Boot image (gzip)' + echo ' dtbs - Device-tree blobs for enabled boards' echo echo ' These will be default as appropriate for a configured platform.' endef diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c index 25a59a23547e..1e3b102389ef 100644 --- a/arch/mips/alchemy/board-mtx1.c +++ b/arch/mips/alchemy/board-mtx1.c @@ -85,10 +85,10 @@ void __init board_setup(void) #endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */ /* Initialize sys_pinfunc */ - au_writel(SYS_PF_NI2, SYS_PINFUNC); + alchemy_wrsys(SYS_PF_NI2, AU1000_SYS_PINFUNC); /* Initialize GPIO */ - au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); + alchemy_wrsys(~0, AU1000_SYS_TRIOUTCLR); alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c index 3fb814be0e91..0fc53e08a894 100644 --- a/arch/mips/alchemy/board-xxs1500.c +++ b/arch/mips/alchemy/board-xxs1500.c @@ -87,9 +87,9 @@ void __init board_setup(void) alchemy_gpio2_enable(); /* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */ - pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3; + pin_func = alchemy_rdsys(AU1000_SYS_PINFUNC) & ~SYS_PF_UR3; pin_func |= SYS_PF_UR3; - au_writel(pin_func, SYS_PINFUNC); + alchemy_wrsys(pin_func, AU1000_SYS_PINFUNC); /* Enable UART */ alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile index cb83d8d21aef..f64744f3b59f 100644 --- a/arch/mips/alchemy/common/Makefile +++ b/arch/mips/alchemy/common/Makefile @@ -5,8 +5,8 @@ # Makefile for the Alchemy Au1xx0 CPUs, generic files. # -obj-y += prom.o time.o clocks.o platform.o power.o setup.o \ - sleeper.o dma.o dbdma.o vss.o irq.o usb.o +obj-y += prom.o time.o clock.o platform.o power.o \ + setup.o sleeper.o dma.o dbdma.o vss.o irq.o usb.o # optional gpiolib support ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),) diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c new file mode 100644 index 000000000000..d7557cde271a --- /dev/null +++ b/arch/mips/alchemy/common/clock.c @@ -0,0 +1,1094 @@ +/* + * Alchemy clocks. + * + * Exposes all configurable internal clock sources to the clk framework. + * + * We have: + * - Root source, usually 12MHz supplied by an external crystal + * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2] + * + * Dividers: + * - 6 clock dividers with: + * * selectable source [one of the PLLs], + * * output divided between [2 .. 512 in steps of 2] (!Au1300) + * or [1 .. 256 in steps of 1] (Au1300), + * * can be enabled individually. + * + * - up to 6 "internal" (fixed) consumers which: + * * take either AUXPLL or one of the above 6 dividers as input, + * * divide this input by 1, 2, or 4 (and 3 on Au1300). + * * can be disabled separately. + * + * Misc clocks: + * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4. + * depends on board design and should be set by bootloader, read-only. + * - peripheral clock: half the rate of sysbus clock, source for a lot + * of peripheral blocks, read-only. + * - memory clock: clk rate to main memory chips, depends on board + * design and is read-only, + * - lrclk: the static bus clock signal for synchronous operation. + * depends on board design, must be set by bootloader, + * but may be required to correctly configure devices attached to + * the static bus. The Au1000/1500/1100 manuals call it LCLK, on + * later models it's called RCLK. + */ + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/clk-private.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <asm/mach-au1x00/au1000.h> + +/* Base clock: 12MHz is the default in all databooks, and I haven't + * found any board yet which uses a different rate. + */ +#define ALCHEMY_ROOTCLK_RATE 12000000 + +/* + * the internal sources which can be driven by the PLLs and dividers. + * Names taken from the databooks, refer to them for more information, + * especially which ones are share a clock line. + */ +static const char * const alchemy_au1300_intclknames[] = { + "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk", + "EXTCLK0", "EXTCLK1" +}; + +static const char * const alchemy_au1200_intclknames[] = { + "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1" +}; + +static const char * const alchemy_au1550_intclknames[] = { + "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko", + "EXTCLK0", "EXTCLK1" +}; + +static const char * const alchemy_au1100_intclknames[] = { + "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1" +}; + +static const char * const alchemy_au1500_intclknames[] = { + NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1" +}; + +static const char * const alchemy_au1000_intclknames[] = { + "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0", + "EXTCLK1" +}; + +/* aliases for a few on-chip sources which are either shared + * or have gone through name changes. + */ +static struct clk_aliastable { + char *alias; + char *base; + int cputype; +} alchemy_clk_aliases[] __initdata = { + { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, + { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, + { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 }, + { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, + { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 }, + { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 }, + { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 }, + { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 }, + { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 }, + { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, + { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 }, + { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, + { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 }, + + { NULL, NULL, 0 }, +}; + +#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x)))) + +/* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ +static spinlock_t alchemy_clk_fg0_lock; +static spinlock_t alchemy_clk_fg1_lock; +static spinlock_t alchemy_clk_csrc_lock; + +/* CPU Core clock *****************************************************/ + +static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long t; + + /* + * On early Au1000, sys_cpupll was write-only. Since these + * silicon versions of Au1000 are not sold, we don't bend + * over backwards trying to determine the frequency. + */ + if (unlikely(au1xxx_cpu_has_pll_wo())) + t = 396000000; + else { + t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; + t *= parent_rate; + } + + return t; +} + +static struct clk_ops alchemy_clkops_cpu = { + .recalc_rate = alchemy_clk_cpu_recalc, +}; + +static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, + int ctype) +{ + struct clk_init_data id; + struct clk_hw *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + id.name = ALCHEMY_CPU_CLK; + id.parent_names = &parent_name; + id.num_parents = 1; + id.flags = CLK_IS_BASIC; + id.ops = &alchemy_clkops_cpu; + h->init = &id; + + return clk_register(NULL, h); +} + +/* AUXPLLs ************************************************************/ + +struct alchemy_auxpll_clk { + struct clk_hw hw; + unsigned long reg; /* au1300 has also AUXPLL2 */ + int maxmult; /* max multiplier */ +}; +#define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw) + +static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); + + return (alchemy_rdsys(a->reg) & 0xff) * parent_rate; +} + +static int alchemy_clk_aux_setr(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); + unsigned long d = rate; + + if (rate) + d /= parent_rate; + else + d = 0; + + /* minimum is 84MHz, max is 756-1032 depending on variant */ + if (((d < 7) && (d != 0)) || (d > a->maxmult)) + return -EINVAL; + + alchemy_wrsys(d, a->reg); + return 0; +} + +static long alchemy_clk_aux_roundr(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + struct alchemy_auxpll_clk *a = to_auxpll_clk(hw); + unsigned long mult; + + if (!rate || !*parent_rate) + return 0; + + mult = rate / (*parent_rate); + + if (mult && (mult < 7)) + mult = 7; + if (mult > a->maxmult) + mult = a->maxmult; + + return (*parent_rate) * mult; +} + +static struct clk_ops alchemy_clkops_aux = { + .recalc_rate = alchemy_clk_aux_recalc, + .set_rate = alchemy_clk_aux_setr, + .round_rate = alchemy_clk_aux_roundr, +}; + +static struct clk __init *alchemy_clk_setup_aux(const char *parent_name, + char *name, int maxmult, + unsigned long reg) +{ + struct clk_init_data id; + struct clk *c; + struct alchemy_auxpll_clk *a; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return ERR_PTR(-ENOMEM); + + id.name = name; + id.parent_names = &parent_name; + id.num_parents = 1; + id.flags = CLK_GET_RATE_NOCACHE; + id.ops = &alchemy_clkops_aux; + + a->reg = reg; + a->maxmult = maxmult; + a->hw.init = &id; + + c = clk_register(NULL, &a->hw); + if (!IS_ERR(c)) + clk_register_clkdev(c, name, NULL); + else + kfree(a); + + return c; +} + +/* sysbus_clk *********************************************************/ + +static struct clk __init *alchemy_clk_setup_sysbus(const char *pn) +{ + unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2; + struct clk *c; + + c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK, + pn, 0, 1, v); + if (!IS_ERR(c)) + clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL); + return c; +} + +/* Peripheral Clock ***************************************************/ + +static struct clk __init *alchemy_clk_setup_periph(const char *pn) +{ + /* Peripheral clock runs at half the rate of sysbus clk */ + struct clk *c; + + c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK, + pn, 0, 1, 2); + if (!IS_ERR(c)) + clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL); + return c; +} + +/* mem clock **********************************************************/ + +static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) +{ + void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR); + unsigned long v; + struct clk *c; + int div; + + switch (ct) { + case ALCHEMY_CPU_AU1550: + case ALCHEMY_CPU_AU1200: + v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); + div = (v & (1 << 15)) ? 1 : 2; + break; + case ALCHEMY_CPU_AU1300: + v = __raw_readl(addr + AU1550_MEM_SDCONFIGB); + div = (v & (1 << 31)) ? 1 : 2; + break; + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1100: + default: + div = 2; + break; + } + + c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn, + 0, 1, div); + if (!IS_ERR(c)) + clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL); + return c; +} + +/* lrclk: external synchronous static bus clock ***********************/ + +static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) +{ + /* MEM_STCFG0[15:13] = divisor. + * L/RCLK = periph_clk / (divisor + 1) + * On Au1000, Au1500, Au1100 it's called LCLK, + * on later models it's called RCLK, but it's the same thing. + */ + struct clk *c; + unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; + + v = (v & 7) + 1; + c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, + pn, 0, 1, v); + if (!IS_ERR(c)) + clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL); + return c; +} + +/* Clock dividers and muxes *******************************************/ + +/* data for fgen and csrc mux-dividers */ +struct alchemy_fgcs_clk { + struct clk_hw hw; + spinlock_t *reglock; /* register lock */ + unsigned long reg; /* SYS_FREQCTRL0/1 */ + int shift; /* offset in register */ + int parent; /* parent before disable [Au1300] */ + int isen; /* is it enabled? */ + int *dt; /* dividertable for csrc */ +}; +#define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw) + +static long alchemy_calc_div(unsigned long rate, unsigned long prate, + int scale, int maxdiv, unsigned long *rv) +{ + long div1, div2; + + div1 = prate / rate; + if ((prate / div1) > rate) + div1++; + + if (scale == 2) { /* only div-by-multiple-of-2 possible */ + if (div1 & 1) + div1++; /* stay <=prate */ + } + + div2 = (div1 / scale) - 1; /* value to write to register */ + + if (div2 > maxdiv) + div2 = maxdiv; + if (rv) + *rv = div2; + + div1 = ((div2 + 1) * scale); + return div1; +} + +static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk, + int scale, int maxdiv) +{ + struct clk *pc, *bpc, *free; + long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; + int j; + + lastdiff = INT_MAX; + bpr = 0; + bpc = NULL; + br = -EINVAL; + free = NULL; + + /* look at the rates each enabled parent supplies and select + * the one that gets closest to but not over the requested rate. + */ + for (j = 0; j < 7; j++) { + pc = clk_get_parent_by_index(hw->clk, j); + if (!pc) + break; + + /* if this parent is currently unused, remember it. + * XXX: I know it's a layering violation, but it works + * so well.. (if (!clk_has_active_children(pc)) ) + */ + if (pc->prepare_count == 0) { + if (!free) + free = pc; + } + + pr = clk_get_rate(pc); + if (pr < rate) + continue; + + /* what can hardware actually provide */ + tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + nr = pr / tdv; + diff = rate - nr; + if (nr > rate) + continue; + + if (diff < lastdiff) { + lastdiff = diff; + bpr = pr; + bpc = pc; + br = nr; + } + if (diff == 0) + break; + } + + /* if we couldn't get the exact rate we wanted from the enabled + * parents, maybe we can tell an available disabled/inactive one + * to give us a rate we can divide down to the requested rate. + */ + if (lastdiff && free) { + for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { + tpr = rate * j; + if (tpr < 0) + break; + pr = clk_round_rate(free, tpr); + + tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + nr = pr / tdv; + diff = rate - nr; + if (nr > rate) + continue; + if (diff < lastdiff) { + lastdiff = diff; + bpr = pr; + bpc = free; + br = nr; + } + if (diff == 0) + break; + } + } + + *best_parent_rate = bpr; + *best_parent_clk = bpc; + return br; +} + +static int alchemy_clk_fgv1_en(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v, flags; + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v |= (1 << 1) << c->shift; + alchemy_wrsys(v, c->reg); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static int alchemy_clk_fgv1_isen(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1); + + return v & 1; +} + +static void alchemy_clk_fgv1_dis(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v, flags; + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v &= ~((1 << 1) << c->shift); + alchemy_wrsys(v, c->reg); + spin_unlock_irqrestore(c->reglock, flags); +} + +static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v, flags; + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + if (index) + v |= (1 << c->shift); + else + v &= ~(1 << c->shift); + alchemy_wrsys(v, c->reg); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + + return (alchemy_rdsys(c->reg) >> c->shift) & 1; +} + +static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long div, v, flags, ret; + int sh = c->shift + 2; + + if (!rate || !parent_rate || rate > (parent_rate / 2)) + return -EINVAL; + ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div); + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v &= ~(0xff << sh); + v |= div << sh; + alchemy_wrsys(v, c->reg); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2); + + v = ((v & 0xff) + 1) * 2; + return parent_rate / v; +} + +static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk) +{ + return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, + best_parent_clk, 2, 512); +} + +/* Au1000, Au1100, Au15x0, Au12x0 */ +static struct clk_ops alchemy_clkops_fgenv1 = { + .recalc_rate = alchemy_clk_fgv1_recalc, + .determine_rate = alchemy_clk_fgv1_detr, + .set_rate = alchemy_clk_fgv1_setr, + .set_parent = alchemy_clk_fgv1_setp, + .get_parent = alchemy_clk_fgv1_getp, + .enable = alchemy_clk_fgv1_en, + .disable = alchemy_clk_fgv1_dis, + .is_enabled = alchemy_clk_fgv1_isen, +}; + +static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c) +{ + unsigned long v = alchemy_rdsys(c->reg); + + v &= ~(3 << c->shift); + v |= (c->parent & 3) << c->shift; + alchemy_wrsys(v, c->reg); + c->isen = 1; +} + +static int alchemy_clk_fgv2_en(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long flags; + + /* enable by setting the previous parent clock */ + spin_lock_irqsave(c->reglock, flags); + __alchemy_clk_fgv2_en(c); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static int alchemy_clk_fgv2_isen(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + + return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0; +} + +static void alchemy_clk_fgv2_dis(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v, flags; + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v &= ~(3 << c->shift); /* set input mux to "disabled" state */ + alchemy_wrsys(v, c->reg); + c->isen = 0; + spin_unlock_irqrestore(c->reglock, flags); +} + +static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long flags; + + spin_lock_irqsave(c->reglock, flags); + c->parent = index + 1; /* value to write to register */ + if (c->isen) + __alchemy_clk_fgv2_en(c); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long flags, v; + + spin_lock_irqsave(c->reglock, flags); + v = c->parent - 1; + spin_unlock_irqrestore(c->reglock, flags); + return v; +} + +/* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the + * dividers behave exactly as on previous models (dividers are multiples + * of 2); with the bit set, dividers are multiples of 1, halving their + * range, but making them also much more flexible. + */ +static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + int sh = c->shift + 2; + unsigned long div, v, flags, ret; + + if (!rate || !parent_rate || rate > parent_rate) + return -EINVAL; + + v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */ + ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2, + v ? 256 : 512, &div); + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v &= ~(0xff << sh); + v |= (div & 0xff) << sh; + alchemy_wrsys(v, c->reg); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + int sh = c->shift + 2; + unsigned long v, t; + + v = alchemy_rdsys(c->reg); + t = parent_rate / (((v >> sh) & 0xff) + 1); + if ((v & (1 << 30)) == 0) /* test scale bit */ + t /= 2; + + return t; +} + +static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + int scale, maxdiv; + + if (alchemy_rdsys(c->reg) & (1 << 30)) { + scale = 1; + maxdiv = 256; + } else { + scale = 2; + maxdiv = 512; + } + + return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, + best_parent_clk, scale, maxdiv); +} + +/* Au1300 larger input mux, no separate disable bit, flexible divider */ +static struct clk_ops alchemy_clkops_fgenv2 = { + .recalc_rate = alchemy_clk_fgv2_recalc, + .determine_rate = alchemy_clk_fgv2_detr, + .set_rate = alchemy_clk_fgv2_setr, + .set_parent = alchemy_clk_fgv2_setp, + .get_parent = alchemy_clk_fgv2_getp, + .enable = alchemy_clk_fgv2_en, + .disable = alchemy_clk_fgv2_dis, + .is_enabled = alchemy_clk_fgv2_isen, +}; + +static const char * const alchemy_clk_fgv1_parents[] = { + ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK +}; + +static const char * const alchemy_clk_fgv2_parents[] = { + ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK +}; + +static const char * const alchemy_clk_fgen_names[] = { + ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, + ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK }; + +static int __init alchemy_clk_init_fgens(int ctype) +{ + struct clk *c; + struct clk_init_data id; + struct alchemy_fgcs_clk *a; + unsigned long v; + int i, ret; + + switch (ctype) { + case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200: + id.ops = &alchemy_clkops_fgenv1; + id.parent_names = (const char **)alchemy_clk_fgv1_parents; + id.num_parents = 2; + break; + case ALCHEMY_CPU_AU1300: + id.ops = &alchemy_clkops_fgenv2; + id.parent_names = (const char **)alchemy_clk_fgv2_parents; + id.num_parents = 3; + break; + default: + return -ENODEV; + } + id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; + + a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); + if (!a) + return -ENOMEM; + + spin_lock_init(&alchemy_clk_fg0_lock); + spin_lock_init(&alchemy_clk_fg1_lock); + ret = 0; + for (i = 0; i < 6; i++) { + id.name = alchemy_clk_fgen_names[i]; + a->shift = 10 * (i < 3 ? i : i - 3); + if (i > 2) { + a->reg = AU1000_SYS_FREQCTRL1; + a->reglock = &alchemy_clk_fg1_lock; + } else { + a->reg = AU1000_SYS_FREQCTRL0; + a->reglock = &alchemy_clk_fg0_lock; + } + + /* default to first parent if bootloader has set + * the mux to disabled state. + */ + if (ctype == ALCHEMY_CPU_AU1300) { + v = alchemy_rdsys(a->reg); + a->parent = (v >> a->shift) & 3; + if (!a->parent) { + a->parent = 1; + a->isen = 0; + } else + a->isen = 1; + } + + a->hw.init = &id; + c = clk_register(NULL, &a->hw); + if (IS_ERR(c)) + ret++; + else + clk_register_clkdev(c, id.name, NULL); + a++; + } + + return ret; +} + +/* internal sources muxes *********************************************/ + +static int alchemy_clk_csrc_isen(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v = alchemy_rdsys(c->reg); + + return (((v >> c->shift) >> 2) & 7) != 0; +} + +static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c) +{ + unsigned long v = alchemy_rdsys(c->reg); + + v &= ~((7 << 2) << c->shift); + v |= ((c->parent & 7) << 2) << c->shift; + alchemy_wrsys(v, c->reg); + c->isen = 1; +} + +static int alchemy_clk_csrc_en(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long flags; + + /* enable by setting the previous parent clock */ + spin_lock_irqsave(c->reglock, flags); + __alchemy_clk_csrc_en(c); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static void alchemy_clk_csrc_dis(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v, flags; + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */ + alchemy_wrsys(v, c->reg); + c->isen = 0; + spin_unlock_irqrestore(c->reglock, flags); +} + +static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long flags; + + spin_lock_irqsave(c->reglock, flags); + c->parent = index + 1; /* value to write to register */ + if (c->isen) + __alchemy_clk_csrc_en(c); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static u8 alchemy_clk_csrc_getp(struct clk_hw *hw) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + + return c->parent - 1; +} + +static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3; + + return parent_rate / c->dt[v]; +} + +static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + unsigned long d, v, flags; + int i; + + if (!rate || !parent_rate || rate > parent_rate) + return -EINVAL; + + d = (parent_rate + (rate / 2)) / rate; + if (d > 4) + return -EINVAL; + if ((d == 3) && (c->dt[2] != 3)) + d = 4; + + for (i = 0; i < 4; i++) + if (c->dt[i] == d) + break; + + if (i >= 4) + return -EINVAL; /* oops */ + + spin_lock_irqsave(c->reglock, flags); + v = alchemy_rdsys(c->reg); + v &= ~(3 << c->shift); + v |= (i & 3) << c->shift; + alchemy_wrsys(v, c->reg); + spin_unlock_irqrestore(c->reglock, flags); + + return 0; +} + +static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk) +{ + struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); + int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ + + return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, + best_parent_clk, scale, 4); +} + +static struct clk_ops alchemy_clkops_csrc = { + .recalc_rate = alchemy_clk_csrc_recalc, + .determine_rate = alchemy_clk_csrc_detr, + .set_rate = alchemy_clk_csrc_setr, + .set_parent = alchemy_clk_csrc_setp, + .get_parent = alchemy_clk_csrc_getp, + .enable = alchemy_clk_csrc_en, + .disable = alchemy_clk_csrc_dis, + .is_enabled = alchemy_clk_csrc_isen, +}; + +static const char * const alchemy_clk_csrc_parents[] = { + /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK, + ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK, + ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK +}; + +/* divider tables */ +static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */ +static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */ + +static int __init alchemy_clk_setup_imux(int ctype) +{ + struct alchemy_fgcs_clk *a; + const char * const *names; + struct clk_init_data id; + unsigned long v; + int i, ret, *dt; + struct clk *c; + + id.ops = &alchemy_clkops_csrc; + id.parent_names = (const char **)alchemy_clk_csrc_parents; + id.num_parents = 7; + id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; + + dt = alchemy_csrc_dt1; + switch (ctype) { + case ALCHEMY_CPU_AU1000: + names = alchemy_au1000_intclknames; + break; + case ALCHEMY_CPU_AU1500: + names = alchemy_au1500_intclknames; + break; + case ALCHEMY_CPU_AU1100: + names = alchemy_au1100_intclknames; + break; + case ALCHEMY_CPU_AU1550: + names = alchemy_au1550_intclknames; + break; + case ALCHEMY_CPU_AU1200: + names = alchemy_au1200_intclknames; + break; + case ALCHEMY_CPU_AU1300: + dt = alchemy_csrc_dt2; + names = alchemy_au1300_intclknames; + break; + default: + return -ENODEV; + } + + a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); + if (!a) + return -ENOMEM; + + spin_lock_init(&alchemy_clk_csrc_lock); + ret = 0; + + for (i = 0; i < 6; i++) { + id.name = names[i]; + if (!id.name) + goto next; + + a->shift = i * 5; + a->reg = AU1000_SYS_CLKSRC; + a->reglock = &alchemy_clk_csrc_lock; + a->dt = dt; + + /* default to first parent clock if mux is initially + * set to disabled state. + */ + v = alchemy_rdsys(a->reg); + a->parent = ((v >> a->shift) >> 2) & 7; + if (!a->parent) { + a->parent = 1; + a->isen = 0; + } else + a->isen = 1; + + a->hw.init = &id; + c = clk_register(NULL, &a->hw); + if (IS_ERR(c)) + ret++; + else + clk_register_clkdev(c, id.name, NULL); +next: + a++; + } + + return ret; +} + + +/**********************************************************************/ + + +#define ERRCK(x) \ + if (IS_ERR(x)) { \ + ret = PTR_ERR(x); \ + goto out; \ + } + +static int __init alchemy_clk_init(void) +{ + int ctype = alchemy_get_cputype(), ret, i; + struct clk_aliastable *t = alchemy_clk_aliases; + struct clk *c; + + /* Root of the Alchemy clock tree: external 12MHz crystal osc */ + c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL, + CLK_IS_ROOT, + ALCHEMY_ROOTCLK_RATE); + ERRCK(c) + + /* CPU core clock */ + c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype); + ERRCK(c) + + /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */ + i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63; + c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK, + i, AU1000_SYS_AUXPLL); + ERRCK(c) + + if (ctype == ALCHEMY_CPU_AU1300) { + c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, + ALCHEMY_AUXPLL2_CLK, i, + AU1300_SYS_AUXPLL2); + ERRCK(c) + } + + /* sysbus clock: cpu core clock divided by 2, 3 or 4 */ + c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK); + ERRCK(c) + + /* peripheral clock: runs at half rate of sysbus clk */ + c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK); + ERRCK(c) + + /* SDR/DDR memory clock */ + c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype); + ERRCK(c) + + /* L/RCLK: external static bus clock for synchronous mode */ + c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); + ERRCK(c) + + /* Frequency dividers 0-5 */ + ret = alchemy_clk_init_fgens(ctype); + if (ret) { + ret = -ENODEV; + goto out; + } + + /* diving muxes for internal sources */ + ret = alchemy_clk_setup_imux(ctype); + if (ret) { + ret = -ENODEV; + goto out; + } + + /* set up aliases drivers might look for */ + while (t->base) { + if (t->cputype == ctype) + clk_add_alias(t->alias, NULL, t->base, NULL); + t++; + } + + pr_info("Alchemy clocktree installed\n"); + return 0; + +out: + return ret; +} +postcore_initcall(alchemy_clk_init); diff --git a/arch/mips/alchemy/common/clocks.c b/arch/mips/alchemy/common/clocks.c deleted file mode 100644 index f38298a8b98c..000000000000 --- a/arch/mips/alchemy/common/clocks.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * BRIEF MODULE DESCRIPTION - * Simple Au1xx0 clocks routines. - * - * Copyright 2001, 2008 MontaVista Software Inc. - * Author: MontaVista Software, Inc. <source@mvista.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/module.h> -#include <linux/spinlock.h> -#include <asm/time.h> -#include <asm/mach-au1x00/au1000.h> - -/* - * I haven't found anyone that doesn't use a 12 MHz source clock, - * but just in case..... - */ -#define AU1000_SRC_CLK 12000000 - -static unsigned int au1x00_clock; /* Hz */ -static unsigned long uart_baud_base; - -/* - * Set the au1000_clock - */ -void set_au1x00_speed(unsigned int new_freq) -{ - au1x00_clock = new_freq; -} - -unsigned int get_au1x00_speed(void) -{ - return au1x00_clock; -} -EXPORT_SYMBOL(get_au1x00_speed); - -/* - * The UART baud base is not known at compile time ... if - * we want to be able to use the same code on different - * speed CPUs. - */ -unsigned long get_au1x00_uart_baud_base(void) -{ - return uart_baud_base; -} - -void set_au1x00_uart_baud_base(unsigned long new_baud_base) -{ - uart_baud_base = new_baud_base; -} - -/* - * We read the real processor speed from the PLL. This is important - * because it is more accurate than computing it from the 32 KHz - * counter, if it exists. If we don't have an accurate processor - * speed, all of the peripherals that derive their clocks based on - * this advertised speed will introduce error and sometimes not work - * properly. This function is further convoluted to still allow configurations - * to do that in case they have really, really old silicon with a - * write-only PLL register. -- Dan - */ -unsigned long au1xxx_calc_clock(void) -{ - unsigned long cpu_speed; - - /* - * On early Au1000, sys_cpupll was write-only. Since these - * silicon versions of Au1000 are not sold by AMD, we don't bend - * over backwards trying to determine the frequency. - */ - if (au1xxx_cpu_has_pll_wo()) - cpu_speed = 396000000; - else - cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; - - /* On Alchemy CPU:counter ratio is 1:1 */ - mips_hpt_frequency = cpu_speed; - /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */ - set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL) - & 0x03) + 2) * 16)); - - set_au1x00_speed(cpu_speed); - - return cpu_speed; -} diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index 19d5642c16d9..745695db5ba0 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c @@ -341,7 +341,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, (dtp->dev_flags & DEV_FLAGS_SYNC)) i |= DDMA_CFG_SYNC; cp->ddma_cfg = i; - au_sync(); + wmb(); /* drain writebuffer */ /* * Return a non-zero value that can be used to find the channel @@ -631,7 +631,7 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) */ dma_cache_wback_inv((unsigned long)buf, nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ - au_sync(); + wmb(); /* drain writebuffer */ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); ctp->chan_ptr->ddma_dbell = 0; @@ -693,7 +693,7 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) */ dma_cache_inv((unsigned long)buf, nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ - au_sync(); + wmb(); /* drain writebuffer */ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); ctp->chan_ptr->ddma_dbell = 0; @@ -760,7 +760,7 @@ void au1xxx_dbdma_stop(u32 chanid) cp = ctp->chan_ptr; cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */ - au_sync(); + wmb(); /* drain writebuffer */ while (!(cp->ddma_stat & DDMA_STAT_H)) { udelay(1); halt_timeout++; @@ -771,7 +771,7 @@ void au1xxx_dbdma_stop(u32 chanid) } /* clear current desc valid and doorbell */ cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V); - au_sync(); + wmb(); /* drain writebuffer */ } EXPORT_SYMBOL(au1xxx_dbdma_stop); @@ -789,9 +789,9 @@ void au1xxx_dbdma_start(u32 chanid) cp = ctp->chan_ptr; cp->ddma_desptr = virt_to_phys(ctp->cur_ptr); cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */ - au_sync(); + wmb(); /* drain writebuffer */ cp->ddma_dbell = 0; - au_sync(); + wmb(); /* drain writebuffer */ } EXPORT_SYMBOL(au1xxx_dbdma_start); @@ -832,7 +832,7 @@ u32 au1xxx_get_dma_residue(u32 chanid) /* This is only valid if the channel is stopped. */ rv = cp->ddma_bytecnt; - au_sync(); + wmb(); /* drain writebuffer */ return rv; } @@ -868,7 +868,7 @@ static irqreturn_t dbdma_interrupt(int irq, void *dev_id) au1x_dma_chan_t *cp; intstat = dbdma_gptr->ddma_intstat; - au_sync(); + wmb(); /* drain writebuffer */ chan_index = __ffs(intstat); ctp = chan_tab_ptr[chan_index]; @@ -877,7 +877,7 @@ static irqreturn_t dbdma_interrupt(int irq, void *dev_id) /* Reset interrupt. */ cp->ddma_irq = 0; - au_sync(); + wmb(); /* drain writebuffer */ if (ctp->chan_callback) ctp->chan_callback(irq, ctp->chan_callparam); @@ -1061,7 +1061,7 @@ static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable) dbdma_gptr->ddma_config = 0; dbdma_gptr->ddma_throttle = 0; dbdma_gptr->ddma_inten = 0xffff; - au_sync(); + wmb(); /* drain writebuffer */ ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr); if (ret) diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c index 9b624e2c0fcf..4fb6207b883b 100644 --- a/arch/mips/alchemy/common/dma.c +++ b/arch/mips/alchemy/common/dma.c @@ -141,17 +141,17 @@ void dump_au1000_dma_channel(unsigned int dmanr) printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr); printk(KERN_INFO " mode = 0x%08x\n", - au_readl(chan->io + DMA_MODE_SET)); + __raw_readl(chan->io + DMA_MODE_SET)); printk(KERN_INFO " addr = 0x%08x\n", - au_readl(chan->io + DMA_PERIPHERAL_ADDR)); + __raw_readl(chan->io + DMA_PERIPHERAL_ADDR)); printk(KERN_INFO " start0 = 0x%08x\n", - au_readl(chan->io + DMA_BUFFER0_START)); + __raw_readl(chan->io + DMA_BUFFER0_START)); printk(KERN_INFO " start1 = 0x%08x\n", - au_readl(chan->io + DMA_BUFFER1_START)); + __raw_readl(chan->io + DMA_BUFFER1_START)); printk(KERN_INFO " count0 = 0x%08x\n", - au_readl(chan->io + DMA_BUFFER0_COUNT)); + __raw_readl(chan->io + DMA_BUFFER0_COUNT)); printk(KERN_INFO " count1 = 0x%08x\n", - au_readl(chan->io + DMA_BUFFER1_COUNT)); + __raw_readl(chan->io + DMA_BUFFER1_COUNT)); } /* @@ -204,7 +204,8 @@ int request_au1000_dma(int dev_id, const char *dev_str, } /* fill it in */ - chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN; + chan->io = (void __iomem *)(KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + + i * DMA_CHANNEL_LEN); chan->dev_id = dev_id; chan->dev_str = dev_str; chan->fifo_addr = dev->fifo_addr; diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c index 63a71817a00c..6cb60abfdcc9 100644 --- a/arch/mips/alchemy/common/irq.c +++ b/arch/mips/alchemy/common/irq.c @@ -389,13 +389,12 @@ static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) return -EINVAL; local_irq_save(flags); - wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK); + wakemsk = alchemy_rdsys(AU1000_SYS_WAKEMSK); if (on) wakemsk |= 1 << bit; else wakemsk &= ~(1 << bit); - __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK); - wmb(); + alchemy_wrsys(wakemsk, AU1000_SYS_WAKEMSK); local_irq_restore(flags); return 0; diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index 9837a134a6d6..d77a64f4c78b 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c @@ -11,6 +11,7 @@ * warranty of any kind, whether express or implied. */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/init.h> @@ -99,10 +100,20 @@ static struct platform_device au1xx0_uart_device = { static void __init alchemy_setup_uarts(int ctype) { - unsigned int uartclk = get_au1x00_uart_baud_base() * 16; + long uartclk; int s = sizeof(struct plat_serial8250_port); int c = alchemy_get_uarts(ctype); struct plat_serial8250_port *ports; + struct clk *clk = clk_get(NULL, ALCHEMY_PERIPH_CLK); + + if (IS_ERR(clk)) + return; + if (clk_prepare_enable(clk)) { + clk_put(clk); + return; + } + uartclk = clk_get_rate(clk); + clk_put(clk); ports = kzalloc(s * (c + 1), GFP_KERNEL); if (!ports) { @@ -420,7 +431,7 @@ static void __init alchemy_setup_macs(int ctype) memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); /* Register second MAC if enabled in pinfunc */ - if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) { + if (!(alchemy_rdsys(AU1000_SYS_PINFUNC) & SYS_PF_NI2)) { ret = platform_device_register(&au1xxx_eth1_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC1\n"); diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c index bdb28dee8fdd..921ed30b440c 100644 --- a/arch/mips/alchemy/common/power.c +++ b/arch/mips/alchemy/common/power.c @@ -54,28 +54,28 @@ static unsigned int sleep_static_memctlr[4][3]; static void save_core_regs(void) { /* Clocks and PLLs. */ - sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0); - sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1); - sleep_sys_clocks[2] = au_readl(SYS_CLKSRC); - sleep_sys_clocks[3] = au_readl(SYS_CPUPLL); - sleep_sys_clocks[4] = au_readl(SYS_AUXPLL); + sleep_sys_clocks[0] = alchemy_rdsys(AU1000_SYS_FREQCTRL0); + sleep_sys_clocks[1] = alchemy_rdsys(AU1000_SYS_FREQCTRL1); + sleep_sys_clocks[2] = alchemy_rdsys(AU1000_SYS_CLKSRC); + sleep_sys_clocks[3] = alchemy_rdsys(AU1000_SYS_CPUPLL); + sleep_sys_clocks[4] = alchemy_rdsys(AU1000_SYS_AUXPLL); /* pin mux config */ - sleep_sys_pinfunc = au_readl(SYS_PINFUNC); + sleep_sys_pinfunc = alchemy_rdsys(AU1000_SYS_PINFUNC); /* Save the static memory controller configuration. */ - sleep_static_memctlr[0][0] = au_readl(MEM_STCFG0); - sleep_static_memctlr[0][1] = au_readl(MEM_STTIME0); - sleep_static_memctlr[0][2] = au_readl(MEM_STADDR0); - sleep_static_memctlr[1][0] = au_readl(MEM_STCFG1); - sleep_static_memctlr[1][1] = au_readl(MEM_STTIME1); - sleep_static_memctlr[1][2] = au_readl(MEM_STADDR1); - sleep_static_memctlr[2][0] = au_readl(MEM_STCFG2); - sleep_static_memctlr[2][1] = au_readl(MEM_STTIME2); - sleep_static_memctlr[2][2] = au_readl(MEM_STADDR2); - sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3); - sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3); - sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3); + sleep_static_memctlr[0][0] = alchemy_rdsmem(AU1000_MEM_STCFG0); + sleep_static_memctlr[0][1] = alchemy_rdsmem(AU1000_MEM_STTIME0); + sleep_static_memctlr[0][2] = alchemy_rdsmem(AU1000_MEM_STADDR0); + sleep_static_memctlr[1][0] = alchemy_rdsmem(AU1000_MEM_STCFG1); + sleep_static_memctlr[1][1] = alchemy_rdsmem(AU1000_MEM_STTIME1); + sleep_static_memctlr[1][2] = alchemy_rdsmem(AU1000_MEM_STADDR1); + sleep_static_memctlr[2][0] = alchemy_rdsmem(AU1000_MEM_STCFG2); + sleep_static_memctlr[2][1] = alchemy_rdsmem(AU1000_MEM_STTIME2); + sleep_static_memctlr[2][2] = alchemy_rdsmem(AU1000_MEM_STADDR2); + sleep_static_memctlr[3][0] = alchemy_rdsmem(AU1000_MEM_STCFG3); + sleep_static_memctlr[3][1] = alchemy_rdsmem(AU1000_MEM_STTIME3); + sleep_static_memctlr[3][2] = alchemy_rdsmem(AU1000_MEM_STADDR3); } static void restore_core_regs(void) @@ -85,30 +85,28 @@ static void restore_core_regs(void) * one of those Au1000 with a write-only PLL, where we dont * have a valid value) */ - au_writel(sleep_sys_clocks[0], SYS_FREQCTRL0); - au_writel(sleep_sys_clocks[1], SYS_FREQCTRL1); - au_writel(sleep_sys_clocks[2], SYS_CLKSRC); - au_writel(sleep_sys_clocks[4], SYS_AUXPLL); + alchemy_wrsys(sleep_sys_clocks[0], AU1000_SYS_FREQCTRL0); + alchemy_wrsys(sleep_sys_clocks[1], AU1000_SYS_FREQCTRL1); + alchemy_wrsys(sleep_sys_clocks[2], AU1000_SYS_CLKSRC); + alchemy_wrsys(sleep_sys_clocks[4], AU1000_SYS_AUXPLL); if (!au1xxx_cpu_has_pll_wo()) - au_writel(sleep_sys_clocks[3], SYS_CPUPLL); - au_sync(); + alchemy_wrsys(sleep_sys_clocks[3], AU1000_SYS_CPUPLL); - au_writel(sleep_sys_pinfunc, SYS_PINFUNC); - au_sync(); + alchemy_wrsys(sleep_sys_pinfunc, AU1000_SYS_PINFUNC); /* Restore the static memory controller configuration. */ - au_writel(sleep_static_memctlr[0][0], MEM_STCFG0); - au_writel(sleep_static_memctlr[0][1], MEM_STTIME0); - au_writel(sleep_static_memctlr[0][2], MEM_STADDR0); - au_writel(sleep_static_memctlr[1][0], MEM_STCFG1); - au_writel(sleep_static_memctlr[1][1], MEM_STTIME1); - au_writel(sleep_static_memctlr[1][2], MEM_STADDR1); - au_writel(sleep_static_memctlr[2][0], MEM_STCFG2); - au_writel(sleep_static_memctlr[2][1], MEM_STTIME2); - au_writel(sleep_static_memctlr[2][2], MEM_STADDR2); - au_writel(sleep_static_memctlr[3][0], MEM_STCFG3); - au_writel(sleep_static_memctlr[3][1], MEM_STTIME3); - au_writel(sleep_static_memctlr[3][2], MEM_STADDR3); + alchemy_wrsmem(sleep_static_memctlr[0][0], AU1000_MEM_STCFG0); + alchemy_wrsmem(sleep_static_memctlr[0][1], AU1000_MEM_STTIME0); + alchemy_wrsmem(sleep_static_memctlr[0][2], AU1000_MEM_STADDR0); + alchemy_wrsmem(sleep_static_memctlr[1][0], AU1000_MEM_STCFG1); + alchemy_wrsmem(sleep_static_memctlr[1][1], AU1000_MEM_STTIME1); + alchemy_wrsmem(sleep_static_memctlr[1][2], AU1000_MEM_STADDR1); + alchemy_wrsmem(sleep_static_memctlr[2][0], AU1000_MEM_STCFG2); + alchemy_wrsmem(sleep_static_memctlr[2][1], AU1000_MEM_STTIME2); + alchemy_wrsmem(sleep_static_memctlr[2][2], AU1000_MEM_STADDR2); + alchemy_wrsmem(sleep_static_memctlr[3][0], AU1000_MEM_STCFG3); + alchemy_wrsmem(sleep_static_memctlr[3][1], AU1000_MEM_STTIME3); + alchemy_wrsmem(sleep_static_memctlr[3][2], AU1000_MEM_STADDR3); } void au_sleep(void) diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 8267e3c97721..ea8f41869e56 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c @@ -27,12 +27,9 @@ #include <linux/init.h> #include <linux/ioport.h> -#include <linux/jiffies.h> -#include <linux/module.h> #include <asm/dma-coherence.h> #include <asm/mipsregs.h> -#include <asm/time.h> #include <au1000.h> @@ -41,18 +38,6 @@ extern void set_cpuspec(void); void __init plat_mem_setup(void) { - unsigned long est_freq; - - /* determine core clock */ - est_freq = au1xxx_calc_clock(); - est_freq += 5000; /* round */ - est_freq -= est_freq % 10000; - printk(KERN_INFO "(PRId %08x) @ %lu.%02lu MHz\n", read_c0_prid(), - est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000); - - /* this is faster than wasting cycles trying to approximate it */ - preset_lpj = (est_freq >> 1) / HZ; - if (au1xxx_cpu_needs_config_od()) /* Various early Au1xx0 errata corrected by this */ set_c0_config(1 << 19); /* Set Config[OD] */ diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 93fa586d52e2..50e17e13c18b 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -46,7 +46,7 @@ static cycle_t au1x_counter1_read(struct clocksource *cs) { - return au_readl(SYS_RTCREAD); + return alchemy_rdsys(AU1000_SYS_RTCREAD); } static struct clocksource au1x_counter1_clocksource = { @@ -60,12 +60,11 @@ static struct clocksource au1x_counter1_clocksource = { static int au1x_rtcmatch2_set_next_event(unsigned long delta, struct clock_event_device *cd) { - delta += au_readl(SYS_RTCREAD); + delta += alchemy_rdsys(AU1000_SYS_RTCREAD); /* wait for register access */ - while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21) + while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M21) ; - au_writel(delta, SYS_RTCMATCH2); - au_sync(); + alchemy_wrsys(delta, AU1000_SYS_RTCMATCH2); return 0; } @@ -112,31 +111,29 @@ static int __init alchemy_time_init(unsigned int m2int) * (the 32S bit seems to be stuck set to 1 once a single clock- * edge is detected, hence the timeouts). */ - if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK)) + if (CNTR_OK != (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & CNTR_OK)) goto cntr_err; /* * setup counter 1 (RTC) to tick at full speed */ t = 0xffffff; - while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t) + while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; - au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */ - au_sync(); + alchemy_wrsys(0, AU1000_SYS_RTCTRIM); /* 32.768 kHz */ t = 0xffffff; - while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) + while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; - au_writel(0, SYS_RTCWRITE); - au_sync(); + alchemy_wrsys(0, AU1000_SYS_RTCWRITE); t = 0xffffff; - while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) + while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c index d193dbea84a1..297805ade849 100644 --- a/arch/mips/alchemy/common/usb.c +++ b/arch/mips/alchemy/common/usb.c @@ -9,6 +9,7 @@ * */ +#include <linux/clk.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> @@ -387,10 +388,25 @@ static inline void au1200_usb_init(void) udelay(1000); } -static inline void au1000_usb_init(unsigned long rb, int reg) +static inline int au1000_usb_init(unsigned long rb, int reg) { void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg); unsigned long r = __raw_readl(base); + struct clk *c; + + /* 48MHz check. Don't init if no one can provide it */ + c = clk_get(NULL, "usbh_clk"); + if (IS_ERR(c)) + return -ENODEV; + if (clk_round_rate(c, 48000000) != 48000000) { + clk_put(c); + return -ENODEV; + } + if (clk_set_rate(c, 48000000)) { + clk_put(c); + return -ENODEV; + } + clk_put(c); #if defined(__BIG_ENDIAN) r |= USBHEN_BE; @@ -400,6 +416,8 @@ static inline void au1000_usb_init(unsigned long rb, int reg) __raw_writel(r, base); wmb(); udelay(1000); + + return 0; } @@ -407,8 +425,15 @@ static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg) { void __iomem *base = (void __iomem *)KSEG1ADDR(rb); unsigned long r = __raw_readl(base + creg); + struct clk *c = clk_get(NULL, "usbh_clk"); + + if (IS_ERR(c)) + return; if (enable) { + if (clk_prepare_enable(c)) + goto out; + __raw_writel(r | USBHEN_CE, base + creg); wmb(); udelay(1000); @@ -423,7 +448,10 @@ static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg) } else { __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg); wmb(); + clk_disable_unprepare(c); } +out: + clk_put(c); } static inline int au1000_usb_control(int block, int enable, unsigned long rb, @@ -457,11 +485,11 @@ int alchemy_usb_control(int block, int enable) case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: ret = au1000_usb_control(block, enable, - AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); + AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: ret = au1000_usb_control(block, enable, - AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); + AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: ret = au1200_usb_control(block, enable); @@ -569,14 +597,18 @@ static struct syscore_ops alchemy_usb_pm_ops = { static int __init alchemy_usb_init(void) { + int ret = 0; + switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: - au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); + ret = au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, + AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: - au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); + ret = au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, + AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: au1200_usb_init(); @@ -586,8 +618,9 @@ static int __init alchemy_usb_init(void) break; } - register_syscore_ops(&alchemy_usb_pm_ops); + if (!ret) + register_syscore_ops(&alchemy_usb_pm_ops); - return 0; + return ret; } arch_initcall(alchemy_usb_init); diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index 92dd929d4057..001102e197f1 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -19,6 +19,7 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/init.h> @@ -496,6 +497,7 @@ int __init db1000_dev_setup(void) int board = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); int c0, c1, d0, d1, s0, s1, flashsize = 32, twosocks = 1; unsigned long pfc; + struct clk *c, *p; if (board == BCSR_WHOAMI_DB1500) { c0 = AU1500_GPIO2_INT; @@ -518,14 +520,25 @@ int __init db1000_dev_setup(void) gpio_direction_input(20); /* sd1 cd# */ /* spi_gpio on SSI0 pins */ - pfc = __raw_readl((void __iomem *)SYS_PINFUNC); + pfc = alchemy_rdsys(AU1000_SYS_PINFUNC); pfc |= (1 << 0); /* SSI0 pins as GPIOs */ - __raw_writel(pfc, (void __iomem *)SYS_PINFUNC); - wmb(); + alchemy_wrsys(pfc, AU1000_SYS_PINFUNC); spi_register_board_info(db1100_spi_info, ARRAY_SIZE(db1100_spi_info)); + /* link LCD clock to AUXPLL */ + p = clk_get(NULL, "auxpll_clk"); + c = clk_get(NULL, "lcd_intclk"); + if (!IS_ERR(c) && !IS_ERR(p)) { + clk_set_parent(c, p); + clk_set_rate(c, clk_get_rate(p)); + } + if (!IS_ERR(c)) + clk_put(c); + if (!IS_ERR(p)) + clk_put(p); + platform_add_devices(db1100_devs, ARRAY_SIZE(db1100_devs)); platform_device_register(&db1100_spi_dev); } else if (board == BCSR_WHOAMI_DB1000) { diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index 9e46667f2597..8c13675a12e7 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -18,6 +18,7 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/i2c.h> @@ -129,7 +130,6 @@ static int __init db1200_detect_board(void) int __init db1200_board_setup(void) { - unsigned long freq0, clksrc, div, pfc; unsigned short whoami; if (db1200_detect_board()) @@ -149,34 +149,6 @@ int __init db1200_board_setup(void) " Board-ID %d Daughtercard ID %d\n", get_system_type(), (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf); - /* SMBus/SPI on PSC0, Audio on PSC1 */ - pfc = __raw_readl((void __iomem *)SYS_PINFUNC); - pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B); - pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3); - pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */ - __raw_writel(pfc, (void __iomem *)SYS_PINFUNC); - wmb(); - - /* Clock configurations: PSC0: ~50MHz via Clkgen0, derived from - * CPU clock; all other clock generators off/unused. - */ - div = (get_au1x00_speed() + 25000000) / 50000000; - if (div & 1) - div++; - div = ((div >> 1) - 1) & 0xff; - - freq0 = div << SYS_FC_FRDIV0_BIT; - __raw_writel(freq0, (void __iomem *)SYS_FREQCTRL0); - wmb(); - freq0 |= SYS_FC_FE0; /* enable F0 */ - __raw_writel(freq0, (void __iomem *)SYS_FREQCTRL0); - wmb(); - - /* psc0_intclk comes 1:1 from F0 */ - clksrc = SYS_CS_MUX_FQ0 << SYS_CS_ME0_BIT; - __raw_writel(clksrc, (void __iomem *)SYS_CLKSRC); - wmb(); - return 0; } @@ -250,7 +222,7 @@ static void au1200_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, static int au1200_nand_device_ready(struct mtd_info *mtd) { - return __raw_readl((void __iomem *)MEM_STSTAT) & 1; + return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1; } static struct mtd_partition db1200_nand_parts[] = { @@ -847,6 +819,7 @@ int __init db1200_dev_setup(void) unsigned long pfc; unsigned short sw; int swapped, bid; + struct clk *c; bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); if ((bid == BCSR_WHOAMI_PB1200_DDR1) || @@ -859,6 +832,25 @@ int __init db1200_dev_setup(void) irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW); bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT); + /* SMBus/SPI on PSC0, Audio on PSC1 */ + pfc = alchemy_rdsys(AU1000_SYS_PINFUNC); + pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B); + pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3); + pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */ + alchemy_wrsys(pfc, AU1000_SYS_PINFUNC); + + /* get 50MHz for I2C driver on PSC0 */ + c = clk_get(NULL, "psc0_intclk"); + if (!IS_ERR(c)) { + pfc = clk_round_rate(c, 50000000); + if ((pfc < 1) || (abs(50000000 - pfc) > 2500000)) + pr_warn("DB1200: cant get I2C close to 50MHz\n"); + else + clk_set_rate(c, pfc); + clk_prepare_enable(c); + clk_put(c); + } + /* insert/eject pairs: one of both is always screaming. To avoid * issues they must not be automatically enabled when initially * requested. @@ -886,7 +878,7 @@ int __init db1200_dev_setup(void) * As a result, in SPI mode, OTG simply won't work (PSC0 uses * it as an input pin which is pulled high on the boards). */ - pfc = __raw_readl((void __iomem *)SYS_PINFUNC) & ~SYS_PINFUNC_P0A; + pfc = alchemy_rdsys(AU1000_SYS_PINFUNC) & ~SYS_PINFUNC_P0A; /* switch off OTG VBUS supply */ gpio_request(215, "otg-vbus"); @@ -912,8 +904,7 @@ int __init db1200_dev_setup(void) printk(KERN_INFO " S6.8 ON : PSC0 mode SPI\n"); printk(KERN_INFO " OTG port VBUS supply disabled\n"); } - __raw_writel(pfc, (void __iomem *)SYS_PINFUNC); - wmb(); + alchemy_wrsys(pfc, AU1000_SYS_PINFUNC); /* Audio: DIP7 selects I2S(0)/AC97(1), but need I2C for I2S! * so: DIP7=1 || DIP8=0 => AC97, DIP7=0 && DIP8=1 => I2S diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index 1aed6be4de10..1c64fdbe4c81 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -4,6 +4,7 @@ * (c) 2009 Manuel Lauss <manuel.lauss@googlemail.com> */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> @@ -20,6 +21,7 @@ #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/smsc911x.h> +#include <linux/wm97xx.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1100_mmc.h> @@ -169,7 +171,7 @@ static void au1300_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, static int au1300_nand_device_ready(struct mtd_info *mtd) { - return __raw_readl((void __iomem *)MEM_STSTAT) & 1; + return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1; } static struct mtd_partition db1300_nand_parts[] = { @@ -710,6 +712,46 @@ static struct platform_device db1300_lcd_dev = { /**********************************************************************/ +static void db1300_wm97xx_irqen(struct wm97xx *wm, int enable) +{ + if (enable) + enable_irq(DB1300_AC97_PEN_INT); + else + disable_irq_nosync(DB1300_AC97_PEN_INT); +} + +static struct wm97xx_mach_ops db1300_wm97xx_ops = { + .irq_enable = db1300_wm97xx_irqen, + .irq_gpio = WM97XX_GPIO_3, +}; + +static int db1300_wm97xx_probe(struct platform_device *pdev) +{ + struct wm97xx *wm = platform_get_drvdata(pdev); + + /* external pendown indicator */ + wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, + WM97XX_GPIO_POL_LOW, WM97XX_GPIO_STICKY, + WM97XX_GPIO_WAKE); + + /* internal "virtual" pendown gpio */ + wm97xx_config_gpio(wm, WM97XX_GPIO_3, WM97XX_GPIO_OUT, + WM97XX_GPIO_POL_LOW, WM97XX_GPIO_NOTSTICKY, + WM97XX_GPIO_NOWAKE); + + wm->pen_irq = DB1300_AC97_PEN_INT; + + return wm97xx_register_mach_ops(wm, &db1300_wm97xx_ops); +} + +static struct platform_driver db1300_wm97xx_driver = { + .driver.name = "wm97xx-touch", + .driver.owner = THIS_MODULE, + .probe = db1300_wm97xx_probe, +}; + +/**********************************************************************/ + static struct platform_device *db1300_dev[] __initdata = { &db1300_eth_dev, &db1300_i2c_dev, @@ -731,6 +773,7 @@ static struct platform_device *db1300_dev[] __initdata = { int __init db1300_dev_setup(void) { int swapped, cpldirq; + struct clk *c; /* setup CPLD IRQ muxer */ cpldirq = au1300_gpio_to_irq(AU1300_PIN_EXTCLK1); @@ -753,6 +796,9 @@ int __init db1300_dev_setup(void) i2c_register_board_info(0, db1300_i2c_devs, ARRAY_SIZE(db1300_i2c_devs)); + if (platform_driver_register(&db1300_wm97xx_driver)) + pr_warn("DB1300: failed to init touch pen irq support!\n"); + /* Audio PSC clock is supplied by codecs (PSC1, 2) */ __raw_writel(PSC_SEL_CLK_SERCLK, (void __iomem *)KSEG1ADDR(AU1300_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); @@ -760,7 +806,13 @@ int __init db1300_dev_setup(void) __raw_writel(PSC_SEL_CLK_SERCLK, (void __iomem *)KSEG1ADDR(AU1300_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET); wmb(); - /* I2C uses internal 48MHz EXTCLK1 */ + /* I2C driver wants 50MHz, get as close as possible */ + c = clk_get(NULL, "psc3_intclk"); + if (!IS_ERR(c)) { + clk_set_rate(c, 50000000); + clk_prepare_enable(c); + clk_put(c); + } __raw_writel(PSC_SEL_CLK_INTCLK, (void __iomem *)KSEG1ADDR(AU1300_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET); wmb(); diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c index bbd8d9884702..0fd5177e35ab 100644 --- a/arch/mips/alchemy/devboards/db1550.c +++ b/arch/mips/alchemy/devboards/db1550.c @@ -4,6 +4,7 @@ * (c) 2011 Manuel Lauss <manuel.lauss@googlemail.com> */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/i2c.h> @@ -31,16 +32,13 @@ static void __init db1550_hw_setup(void) { void __iomem *base; + unsigned long v; - /* complete SPI setup: link psc0_intclk to a 48MHz source, - * and assign GPIO16 to PSC0_SYNC1 (SPI cs# line) as well as PSC1_SYNC - * for AC97 on PB1550. + /* complete pin setup: assign GPIO16 to PSC0_SYNC1 (SPI cs# line) + * as well as PSC1_SYNC for AC97 on PB1550. */ - base = (void __iomem *)SYS_CLKSRC; - __raw_writel(__raw_readl(base) | 0x000001e0, base); - base = (void __iomem *)SYS_PINFUNC; - __raw_writel(__raw_readl(base) | 1 | SYS_PF_PSC1_S1, base); - wmb(); + v = alchemy_rdsys(AU1000_SYS_PINFUNC); + alchemy_wrsys(v | 1 | SYS_PF_PSC1_S1, AU1000_SYS_PINFUNC); /* reset the AC97 codec now, the reset time in the psc-ac97 driver * is apparently too short although it's ridiculous as it is. @@ -151,7 +149,7 @@ static void au1550_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, static int au1550_nand_device_ready(struct mtd_info *mtd) { - return __raw_readl((void __iomem *)MEM_STSTAT) & 1; + return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1; } static struct mtd_partition db1550_nand_parts[] = { @@ -217,7 +215,7 @@ static struct platform_device pb1550_nand_dev = { static void __init pb1550_nand_setup(void) { - int boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | + int boot_swapboot = (alchemy_rdsmem(AU1000_MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); gpio_direction_input(206); /* de-assert NAND CS# */ @@ -574,6 +572,7 @@ static void __init pb1550_devices(void) int __init db1550_dev_setup(void) { int swapped, id; + struct clk *c; id = (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) != BCSR_WHOAMI_DB1550); @@ -582,6 +581,19 @@ int __init db1550_dev_setup(void) spi_register_board_info(db1550_spi_devs, ARRAY_SIZE(db1550_i2c_devs)); + c = clk_get(NULL, "psc0_intclk"); + if (!IS_ERR(c)) { + clk_set_rate(c, 50000000); + clk_prepare_enable(c); + clk_put(c); + } + c = clk_get(NULL, "psc2_intclk"); + if (!IS_ERR(c)) { + clk_set_rate(c, db1550_spi_platdata.mainclk_hz); + clk_prepare_enable(c); + clk_put(c); + } + /* Audio PSC clock is supplied by codecs (PSC1, 3) FIXME: platdata!! */ __raw_writel(PSC_SEL_CLK_SERCLK, (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c index 8df86eb94972..be139a0198b0 100644 --- a/arch/mips/alchemy/devboards/platform.c +++ b/arch/mips/alchemy/devboards/platform.c @@ -11,6 +11,7 @@ #include <linux/pm.h> #include <asm/bootinfo.h> +#include <asm/idle.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-db1x00/bcsr.h> @@ -53,6 +54,8 @@ static void db1x_power_off(void) { bcsr_write(BCSR_RESETS, 0); bcsr_write(BCSR_SYSTEM, BCSR_SYSTEM_PWROFF | BCSR_SYSTEM_RESET); + while (1) /* sit and spin */ + cpu_wait(); } static void db1x_reset(char *c) diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c index 61e90fe9eab1..bfeb8f3c0be6 100644 --- a/arch/mips/alchemy/devboards/pm.c +++ b/arch/mips/alchemy/devboards/pm.c @@ -45,23 +45,20 @@ static int db1x_pm_enter(suspend_state_t state) alchemy_gpio1_input_enable(); /* clear and setup wake cause and source */ - au_writel(0, SYS_WAKEMSK); - au_sync(); - au_writel(0, SYS_WAKESRC); - au_sync(); + alchemy_wrsys(0, AU1000_SYS_WAKEMSK); + alchemy_wrsys(0, AU1000_SYS_WAKESRC); - au_writel(db1x_pm_wakemsk, SYS_WAKEMSK); - au_sync(); + alchemy_wrsys(db1x_pm_wakemsk, AU1000_SYS_WAKEMSK); /* setup 1Hz-timer-based wakeup: wait for reg access */ - while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20) + while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M20) asm volatile ("nop"); - au_writel(au_readl(SYS_TOYREAD) + db1x_pm_sleep_secs, SYS_TOYMATCH2); - au_sync(); + alchemy_wrsys(alchemy_rdsys(AU1000_SYS_TOYREAD) + db1x_pm_sleep_secs, + AU1000_SYS_TOYMATCH2); /* wait for value to really hit the register */ - while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20) + while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M20) asm volatile ("nop"); /* ...and now the sandman can come! */ @@ -102,12 +99,10 @@ static void db1x_pm_end(void) /* read and store wakeup source, the clear the register. To * be able to clear it, WAKEMSK must be cleared first. */ - db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC); - - au_writel(0, SYS_WAKEMSK); - au_writel(0, SYS_WAKESRC); - au_sync(); + db1x_pm_last_wakesrc = alchemy_rdsys(AU1000_SYS_WAKESRC); + alchemy_wrsys(0, AU1000_SYS_WAKEMSK); + alchemy_wrsys(0, AU1000_SYS_WAKESRC); } static const struct platform_suspend_ops db1x_pm_ops = { @@ -242,17 +237,13 @@ static int __init pm_init(void) * for confirmation since there's plenty of time from here to * the next suspend cycle. */ - if (au_readl(SYS_TOYTRIM) != 32767) { - au_writel(32767, SYS_TOYTRIM); - au_sync(); - } + if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) + alchemy_wrsys(32767, AU1000_SYS_TOYTRIM); - db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC); + db1x_pm_last_wakesrc = alchemy_rdsys(AU1000_SYS_WAKESRC); - au_writel(0, SYS_WAKESRC); - au_sync(); - au_writel(0, SYS_WAKEMSK); - au_sync(); + alchemy_wrsys(0, AU1000_SYS_WAKESRC); + alchemy_wrsys(0, AU1000_SYS_WAKEMSK); suspend_set_ops(&db1x_pm_ops); diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index 09cb6f7aa3db..fc21d3659fa0 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig @@ -11,8 +11,6 @@ config BCM47XX_SSB select SSB_DRIVER_PCICORE if PCI select SSB_PCICORE_HOSTMODE if PCI select SSB_DRIVER_GPIO - select GPIOLIB - select LEDS_GPIO_REGISTER default y help Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. @@ -22,6 +20,7 @@ config BCM47XX_SSB config BCM47XX_BCMA bool "BCMA Support for Broadcom BCM47XX" select SYS_HAS_CPU_MIPS32_R2 + select SYS_SUPPORTS_HIGHMEM select CPU_MIPSR2_IRQ_VI select BCMA select BCMA_HOST_SOC @@ -29,8 +28,6 @@ config BCM47XX_BCMA select BCMA_HOST_PCI if PCI select BCMA_DRIVER_PCI_HOSTMODE if PCI select BCMA_DRIVER_GPIO - select GPIOLIB - select LEDS_GPIO_REGISTER default y help Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h index 0194c3b9a729..f1cc9d0495d8 100644 --- a/arch/mips/bcm47xx/bcm47xx_private.h +++ b/arch/mips/bcm47xx/bcm47xx_private.h @@ -3,6 +3,9 @@ #include <linux/kernel.h> +/* prom.c */ +void __init bcm47xx_prom_highmem_init(void); + /* buttons.c */ int __init bcm47xx_buttons_register(void); diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c index 44ab1be68c3c..b3ae068ca4fa 100644 --- a/arch/mips/bcm47xx/board.c +++ b/arch/mips/bcm47xx/board.c @@ -58,6 +58,7 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_machine_name[] __initconst = static const struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = { {{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RTN10U"}, + {{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RTN10D"}, {{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"}, {{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RTN12B1"}, {{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RTN12C1"}, @@ -80,6 +81,14 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initcons { {0}, NULL}, }; +/* hardware_version, boardnum */ +static const +struct bcm47xx_board_type_list2 bcm47xx_board_list_hw_version_num[] __initconst = { + {{BCM47XX_BOARD_MICROSOFT_MN700, "Microsoft MN-700"}, "WL500-", "mn700"}, + {{BCM47XX_BOARD_ASUS_WL500G, "Asus WL500G"}, "WL500-", "asusX"}, + { {0}, NULL}, +}; + /* productid */ static const struct bcm47xx_board_type_list1 bcm47xx_board_list_productid[] __initconst = { @@ -98,7 +107,7 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_productid[] __initconst = { /* ModelId */ static const struct bcm47xx_board_type_list1 bcm47xx_board_list_ModelId[] __initconst = { - {{BCM47XX_BOARD_DELL_TM2300, "Dell WX-5565"}, "WX-5565"}, + {{BCM47XX_BOARD_DELL_TM2300, "Dell TrueMobile 2300"}, "WX-5565"}, {{BCM47XX_BOARD_MOTOROLA_WE800G, "Motorola WE800G"}, "WE800G"}, {{BCM47XX_BOARD_MOTOROLA_WR850GP, "Motorola WR850GP"}, "WR850GP"}, {{BCM47XX_BOARD_MOTOROLA_WR850GV2V3, "Motorola WR850G"}, "WR850G"}, @@ -180,9 +189,9 @@ struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = { {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"}, {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"}, {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"}, - {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0101", "42", "0x10"}, - {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0467", "42", "0x10"}, - {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0708", "42", "0x10"}, + {{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101, "Linksys WRT54G/GS/GL"}, "0x0101", "42", "0x10"}, + {{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467, "Linksys WRT54G/GS/GL"}, "0x0467", "42", "0x10"}, + {{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708, "Linksys WRT54G/GS/GL"}, "0x0708", "42", "0x10"}, { {0}, NULL}, }; @@ -237,6 +246,15 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void) } } + if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 && + bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) { + for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) { + if (!strstarts(buf1, e2->value1) && + !strcmp(buf2, e2->value2)) + return &e2->board; + } + } + if (bcm47xx_nvram_getenv("productid", buf1, sizeof(buf1)) >= 0) { for (e1 = bcm47xx_board_list_productid; e1->value1; e1++) { if (!strcmp(buf1, e1->value1)) diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c index 49a1ce06844b..913182bcafb8 100644 --- a/arch/mips/bcm47xx/buttons.c +++ b/arch/mips/bcm47xx/buttons.c @@ -56,6 +56,11 @@ bcm47xx_buttons_asus_wl330ge[] __initconst = { }; static const struct gpio_keys_button +bcm47xx_buttons_asus_wl500g[] __initconst = { + BCM47XX_GPIO_KEY(6, KEY_RESTART), +}; + +static const struct gpio_keys_button bcm47xx_buttons_asus_wl500gd[] __initconst = { BCM47XX_GPIO_KEY(6, KEY_RESTART), }; @@ -265,7 +270,7 @@ bcm47xx_buttons_linksys_wrt54g3gv2[] __initconst = { }; static const struct gpio_keys_button -bcm47xx_buttons_linksys_wrt54gsv1[] __initconst = { +bcm47xx_buttons_linksys_wrt54g_generic[] __initconst = { BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON), BCM47XX_GPIO_KEY(6, KEY_RESTART), }; @@ -288,6 +293,13 @@ bcm47xx_buttons_linksys_wrtsl54gs[] __initconst = { BCM47XX_GPIO_KEY(6, KEY_RESTART), }; +/* Microsoft */ + +static const struct gpio_keys_button +bcm47xx_buttons_microsoft_nm700[] __initconst = { + BCM47XX_GPIO_KEY(7, KEY_RESTART), +}; + /* Motorola */ static const struct gpio_keys_button @@ -329,6 +341,12 @@ bcm47xx_buttons_netgear_wndr4500v1[] __initconst = { }; static const struct gpio_keys_button +bcm47xx_buttons_netgear_wnr3500lv1[] __initconst = { + BCM47XX_GPIO_KEY(4, KEY_RESTART), + BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON), +}; + +static const struct gpio_keys_button bcm47xx_buttons_netgear_wnr834bv2[] __initconst = { BCM47XX_GPIO_KEY(6, KEY_RESTART), }; @@ -395,6 +413,9 @@ int __init bcm47xx_buttons_register(void) case BCM47XX_BOARD_ASUS_WL330GE: err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl330ge); break; + case BCM47XX_BOARD_ASUS_WL500G: + err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500g); + break; case BCM47XX_BOARD_ASUS_WL500GD: err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gd); break; @@ -501,12 +522,14 @@ int __init bcm47xx_buttons_register(void) case BCM47XX_BOARD_LINKSYS_WRT310NV1: err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1); break; - case BCM47XX_BOARD_LINKSYS_WRT54G: - err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54gsv1); - break; case BCM47XX_BOARD_LINKSYS_WRT54G3GV2: err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g3gv2); break; + case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101: + case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467: + case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708: + err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g_generic); + break; case BCM47XX_BOARD_LINKSYS_WRT610NV1: err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv1); break; @@ -517,6 +540,10 @@ int __init bcm47xx_buttons_register(void) err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrtsl54gs); break; + case BCM47XX_BOARD_MICROSOFT_MN700: + err = bcm47xx_copy_bdata(bcm47xx_buttons_microsoft_nm700); + break; + case BCM47XX_BOARD_MOTOROLA_WE800G: err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_we800g); break; @@ -536,6 +563,9 @@ int __init bcm47xx_buttons_register(void) case BCM47XX_BOARD_NETGEAR_WNDR4500V1: err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr4500v1); break; + case BCM47XX_BOARD_NETGEAR_WNR3500L: + err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr3500lv1); + break; case BCM47XX_BOARD_NETGEAR_WNR834BV2: err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr834bv2); break; diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index adcb547a91c3..903a656d4119 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -35,6 +35,15 @@ bcm47xx_leds_asus_rtn12[] __initconst = { }; static const struct gpio_led +bcm47xx_leds_asus_rtn15u[] __initconst = { + /* TODO: Add "wlan" LED */ + BCM47XX_GPIO_LED(3, "blue", "wan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(4, "blue", "lan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(6, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(9, "blue", "usb", 0, LEDS_GPIO_DEFSTATE_OFF), +}; + +static const struct gpio_led bcm47xx_leds_asus_rtn16[] __initconst = { BCM47XX_GPIO_LED(1, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), @@ -42,8 +51,8 @@ bcm47xx_leds_asus_rtn16[] __initconst = { static const struct gpio_led bcm47xx_leds_asus_rtn66u[] __initconst = { - BCM47XX_GPIO_LED(12, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON), - BCM47XX_GPIO_LED(15, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(12, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(15, "blue", "usb", 1, LEDS_GPIO_DEFSTATE_OFF), }; static const struct gpio_led @@ -64,6 +73,11 @@ bcm47xx_leds_asus_wl330ge[] __initconst = { }; static const struct gpio_led +bcm47xx_leds_asus_wl500g[] __initconst = { + BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON), +}; + +static const struct gpio_led bcm47xx_leds_asus_wl500gd[] __initconst = { BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON), }; @@ -216,8 +230,8 @@ bcm47xx_leds_linksys_e1000v1[] __initconst = { static const struct gpio_led bcm47xx_leds_linksys_e1000v21[] __initconst = { - BCM47XX_GPIO_LED(5, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(6, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(5, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(6, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "amber", "wps", 0, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(8, "blue", "wps", 0, LEDS_GPIO_DEFSTATE_OFF), }; @@ -292,7 +306,7 @@ bcm47xx_leds_linksys_wrt310nv1[] __initconst = { }; static const struct gpio_led -bcm47xx_leds_linksys_wrt54gsv1[] __initconst = { +bcm47xx_leds_linksys_wrt54g_generic[] __initconst = { BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(5, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), @@ -306,6 +320,24 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { BCM47XX_GPIO_LED(3, "blue", "3g", 0, LEDS_GPIO_DEFSTATE_OFF), }; +/* Verified on: WRT54GS V1.0 */ +static const struct gpio_led +bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { + BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), +}; + +/* Verified on: WRT54GL V1.1 */ +static const struct gpio_led +bcm47xx_leds_linksys_wrt54g_type_0467[] __initconst = { + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(2, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(3, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), +}; + static const struct gpio_led bcm47xx_leds_linksys_wrt610nv1[] __initconst = { BCM47XX_GPIO_LED(0, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF), @@ -325,11 +357,17 @@ bcm47xx_leds_linksys_wrt610nv2[] __initconst = { static const struct gpio_led bcm47xx_leds_linksys_wrtsl54gs[] __initconst = { - BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON), - BCM47XX_GPIO_LED(2, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(3, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(7, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(0, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(5, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(7, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), +}; + +/* Microsoft */ + +static const struct gpio_led +bcm47xx_leds_microsoft_nm700[] __initconst = { + BCM47XX_GPIO_LED(6, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON), }; /* Motorola */ @@ -377,6 +415,15 @@ bcm47xx_leds_netgear_wndr4500v1[] __initconst = { }; static const struct gpio_led +bcm47xx_leds_netgear_wnr3500lv1[] __initconst = { + BCM47XX_GPIO_LED(0, "blue", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(2, "green", "wan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(3, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), + BCM47XX_GPIO_LED(7, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF), +}; + +static const struct gpio_led bcm47xx_leds_netgear_wnr834bv2[] __initconst = { BCM47XX_GPIO_LED(2, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(3, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF), @@ -417,6 +464,9 @@ void __init bcm47xx_leds_register(void) case BCM47XX_BOARD_ASUS_RTN12: bcm47xx_set_pdata(bcm47xx_leds_asus_rtn12); break; + case BCM47XX_BOARD_ASUS_RTN15U: + bcm47xx_set_pdata(bcm47xx_leds_asus_rtn15u); + break; case BCM47XX_BOARD_ASUS_RTN16: bcm47xx_set_pdata(bcm47xx_leds_asus_rtn16); break; @@ -432,6 +482,9 @@ void __init bcm47xx_leds_register(void) case BCM47XX_BOARD_ASUS_WL330GE: bcm47xx_set_pdata(bcm47xx_leds_asus_wl330ge); break; + case BCM47XX_BOARD_ASUS_WL500G: + bcm47xx_set_pdata(bcm47xx_leds_asus_wl500g); + break; case BCM47XX_BOARD_ASUS_WL500GD: bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gd); break; @@ -538,12 +591,18 @@ void __init bcm47xx_leds_register(void) case BCM47XX_BOARD_LINKSYS_WRT310NV1: bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1); break; - case BCM47XX_BOARD_LINKSYS_WRT54G: - bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54gsv1); - break; case BCM47XX_BOARD_LINKSYS_WRT54G3GV2: bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g3gv2); break; + case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101: + bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g_type_0101); + break; + case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467: + bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g_type_0467); + break; + case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708: + bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g_generic); + break; case BCM47XX_BOARD_LINKSYS_WRT610NV1: bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv1); break; @@ -554,6 +613,10 @@ void __init bcm47xx_leds_register(void) bcm47xx_set_pdata(bcm47xx_leds_linksys_wrtsl54gs); break; + case BCM47XX_BOARD_MICROSOFT_MN700: + bcm47xx_set_pdata(bcm47xx_leds_microsoft_nm700); + break; + case BCM47XX_BOARD_MOTOROLA_WE800G: bcm47xx_set_pdata(bcm47xx_leds_motorola_we800g); break; @@ -570,6 +633,9 @@ void __init bcm47xx_leds_register(void) case BCM47XX_BOARD_NETGEAR_WNDR4500V1: bcm47xx_set_pdata(bcm47xx_leds_netgear_wndr4500v1); break; + case BCM47XX_BOARD_NETGEAR_WNR3500L: + bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr3500lv1); + break; case BCM47XX_BOARD_NETGEAR_WNR834BV2: bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2); break; diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c index 1a03a2f43496..1b170bf5f7f0 100644 --- a/arch/mips/bcm47xx/prom.c +++ b/arch/mips/bcm47xx/prom.c @@ -51,6 +51,8 @@ __init void bcm47xx_set_system_type(u16 chip_id) chip_id); } +static unsigned long lowmem __initdata; + static __init void prom_init_mem(void) { unsigned long mem; @@ -87,6 +89,7 @@ static __init void prom_init_mem(void) if (!memcmp(prom_init, prom_init + mem, 32)) break; } + lowmem = mem; /* Ignoring the last page when ddr size is 128M. Cached * accesses to last page is causing the processor to prefetch @@ -95,7 +98,6 @@ static __init void prom_init_mem(void) */ if (c->cputype == CPU_74K && (mem == (128 << 20))) mem -= 0x1000; - add_memory_region(0, mem, BOOT_MEM_RAM); } @@ -114,3 +116,67 @@ void __init prom_init(void) void __init prom_free_prom_memory(void) { } + +#if defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM) + +#define EXTVBASE 0xc0000000 +#define ENTRYLO(x) ((pte_val(pfn_pte((x) >> _PFN_SHIFT, PAGE_KERNEL_UNCACHED)) >> 6) | 1) + +#include <asm/tlbflush.h> + +/* Stripped version of tlb_init, with the call to build_tlb_refill_handler + * dropped. Calling it at this stage causes a hang. + */ +void __cpuinit early_tlb_init(void) +{ + write_c0_pagemask(PM_DEFAULT_MASK); + write_c0_wired(0); + temp_tlb_entry = current_cpu_data.tlbsize - 1; + local_flush_tlb_all(); +} + +void __init bcm47xx_prom_highmem_init(void) +{ + unsigned long off = (unsigned long)prom_init; + unsigned long extmem = 0; + bool highmem_region = false; + + if (WARN_ON(bcm47xx_bus_type != BCM47XX_BUS_TYPE_BCMA)) + return; + + if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) + highmem_region = true; + + if (lowmem != 128 << 20 || !highmem_region) + return; + + early_tlb_init(); + + /* Add one temporary TLB entry to map SDRAM Region 2. + * Physical Virtual + * 0x80000000 0xc0000000 (1st: 256MB) + * 0x90000000 0xd0000000 (2nd: 256MB) + */ + add_temporary_entry(ENTRYLO(0x80000000), + ENTRYLO(0x80000000 + (256 << 20)), + EXTVBASE, PM_256M); + + off = EXTVBASE + __pa(off); + for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) { + if (!memcmp(prom_init, (void *)(off + extmem), 16)) + break; + } + extmem -= lowmem; + + early_tlb_init(); + + if (!extmem) + return; + + pr_warn("Found %lu MiB of extra memory, but highmem is unsupported yet!\n", + extmem >> 20); + + /* TODO: Register extra memory */ +} + +#endif /* defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM) */ diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 63a4b0e915dc..c00585d915bc 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -59,7 +59,16 @@ static void bcm47xx_machine_restart(char *command) switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: + if (bcm47xx_bus.ssb.chip_id == 0x4785) + write_c0_diag4(1 << 22); ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1); + if (bcm47xx_bus.ssb.chip_id == 0x4785) { + __asm__ __volatile__( + ".set\tmips3\n\t" + "sync\n\t" + "wait\n\t" + ".set\tmips0"); + } break; #endif #ifdef CONFIG_BCM47XX_BCMA @@ -202,6 +211,10 @@ static void __init bcm47xx_register_bcma(void) err = bcma_host_soc_register(&bcm47xx_bus.bcma); if (err) + panic("Failed to register BCMA bus (err %d)", err); + + err = bcma_host_soc_init(&bcm47xx_bus.bcma); + if (err) panic("Failed to initialize BCMA bus (err %d)", err); bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo, NULL); @@ -218,6 +231,9 @@ void __init plat_mem_setup(void) bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA; bcm47xx_register_bcma(); bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id); +#ifdef CONFIG_HIGHMEM + bcm47xx_prom_highmem_init(); +#endif #endif } else { printk(KERN_INFO "bcm47xx: using ssb bus\n"); diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c index da4cdb16844e..41226b68de3d 100644 --- a/arch/mips/bcm47xx/sprom.c +++ b/arch/mips/bcm47xx/sprom.c @@ -28,6 +28,8 @@ #include <bcm47xx.h> #include <bcm47xx_nvram.h> +#include <linux/if_ether.h> +#include <linux/etherdevice.h> static void create_key(const char *prefix, const char *postfix, const char *name, char *buf, int len) @@ -631,6 +633,33 @@ static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom, } } +static bool bcm47xx_is_valid_mac(u8 *mac) +{ + return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c); +} + +static int bcm47xx_increase_mac_addr(u8 *mac, u8 num) +{ + u8 *oui = mac + ETH_ALEN/2 - 1; + u8 *p = mac + ETH_ALEN - 1; + + do { + (*p) += num; + if (*p > num) + break; + p--; + num = 1; + } while (p != oui); + + if (p == oui) { + pr_err("unable to fetch mac address\n"); + return -ENOENT; + } + return 0; +} + +static int mac_addr_used = 2; + static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix, bool fallback) { @@ -648,6 +677,25 @@ static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback); nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback); + + /* The address prefix 00:90:4C is used by Broadcom in their initial + configuration. When a mac address with the prefix 00:90:4C is used + all devices from the same series are sharing the same mac address. + To prevent mac address collisions we replace them with a mac address + based on the base address. */ + if (!bcm47xx_is_valid_mac(sprom->il0mac)) { + u8 mac[6]; + + nvram_read_macaddr(NULL, "et0macaddr", mac, false); + if (bcm47xx_is_valid_mac(mac)) { + int err = bcm47xx_increase_mac_addr(mac, mac_addr_used); + + if (!err) { + ether_addr_copy(sprom->il0mac, mac); + mac_addr_used++; + } + } + } } static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix, diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index fd4e76c00a42..536f64443031 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c @@ -24,7 +24,9 @@ EXPORT_SYMBOL(bcm63xx_regs_base); const int *bcm63xx_irqs; EXPORT_SYMBOL(bcm63xx_irqs); -static u16 bcm63xx_cpu_id; +u16 bcm63xx_cpu_id __read_mostly; +EXPORT_SYMBOL(bcm63xx_cpu_id); + static u8 bcm63xx_cpu_rev; static unsigned int bcm63xx_cpu_freq; static unsigned int bcm63xx_memory_size; @@ -97,13 +99,6 @@ static const int bcm6368_irqs[] = { }; -u16 __bcm63xx_get_cpu_id(void) -{ - return bcm63xx_cpu_id; -} - -EXPORT_SYMBOL(__bcm63xx_get_cpu_id); - u8 bcm63xx_get_cpu_rev(void) { return bcm63xx_cpu_rev; diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 52bc01df9bfe..e8284771d620 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -14,7 +14,6 @@ #include <bcm63xx_io.h> #include <bcm63xx_regs.h> -#ifdef BCMCPU_RUNTIME_DETECT static const unsigned long bcm6348_regs_enetdmac[] = { [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG, [ENETDMAC_IR] = ENETDMAC_IR_REG, @@ -43,9 +42,6 @@ static __init void bcm63xx_enetdmac_regs_init(void) else bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac; } -#else -static __init void bcm63xx_enetdmac_regs_init(void) { } -#endif static struct resource shared_res[] = { { diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c index d12daed749bc..ad448e41e3bd 100644 --- a/arch/mips/bcm63xx/dev-spi.c +++ b/arch/mips/bcm63xx/dev-spi.c @@ -18,7 +18,6 @@ #include <bcm63xx_dev_spi.h> #include <bcm63xx_regs.h> -#ifdef BCMCPU_RUNTIME_DETECT /* * register offsets */ @@ -41,9 +40,6 @@ static __init void bcm63xx_spi_regs_init(void) BCMCPU_IS_6362() || BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6358_regs_spi; } -#else -static __init void bcm63xx_spi_regs_init(void) { } -#endif static struct resource spi_resources[] = { { diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c index a6c2135dbf38..468bc7b99cd3 100644 --- a/arch/mips/bcm63xx/gpio.c +++ b/arch/mips/bcm63xx/gpio.c @@ -18,19 +18,6 @@ #include <bcm63xx_io.h> #include <bcm63xx_regs.h> -#ifndef BCMCPU_RUNTIME_DETECT -#define gpio_out_low_reg GPIO_DATA_LO_REG -#ifdef CONFIG_BCM63XX_CPU_6345 -#ifdef gpio_out_low_reg -#undef gpio_out_low_reg -#define gpio_out_low_reg GPIO_DATA_LO_REG_6345 -#endif /* gpio_out_low_reg */ -#endif /* CONFIG_BCM63XX_CPU_6345 */ - -static inline void bcm63xx_gpio_out_low_reg_init(void) -{ -} -#else /* ! BCMCPU_RUNTIME_DETECT */ static u32 gpio_out_low_reg; static void bcm63xx_gpio_out_low_reg_init(void) @@ -44,7 +31,6 @@ static void bcm63xx_gpio_out_low_reg_init(void) break; } } -#endif /* ! BCMCPU_RUNTIME_DETECT */ static DEFINE_SPINLOCK(bcm63xx_gpio_lock); static u32 gpio_out_low, gpio_out_high; diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index 1525f8a3841b..b94bf44d8d8e 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/irq.h> +#include <linux/spinlock.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <bcm63xx_cpu.h> @@ -19,222 +20,20 @@ #include <bcm63xx_io.h> #include <bcm63xx_irq.h> -static void __dispatch_internal(void) __maybe_unused; -static void __dispatch_internal_64(void) __maybe_unused; -static void __internal_irq_mask_32(unsigned int irq) __maybe_unused; -static void __internal_irq_mask_64(unsigned int irq) __maybe_unused; -static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused; -static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused; - -#ifndef BCMCPU_RUNTIME_DETECT -#ifdef CONFIG_BCM63XX_CPU_3368 -#define irq_stat_reg PERF_IRQSTAT_3368_REG -#define irq_mask_reg PERF_IRQMASK_3368_REG -#define irq_bits 32 -#define is_ext_irq_cascaded 0 -#define ext_irq_start 0 -#define ext_irq_end 0 -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_3368 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6328 -#define irq_stat_reg PERF_IRQSTAT_6328_REG -#define irq_mask_reg PERF_IRQMASK_6328_REG -#define irq_bits 64 -#define is_ext_irq_cascaded 1 -#define ext_irq_start (BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE) -#define ext_irq_end (BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE) -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6328 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6338 -#define irq_stat_reg PERF_IRQSTAT_6338_REG -#define irq_mask_reg PERF_IRQMASK_6338_REG -#define irq_bits 32 -#define is_ext_irq_cascaded 0 -#define ext_irq_start 0 -#define ext_irq_end 0 -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6338 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6345 -#define irq_stat_reg PERF_IRQSTAT_6345_REG -#define irq_mask_reg PERF_IRQMASK_6345_REG -#define irq_bits 32 -#define is_ext_irq_cascaded 0 -#define ext_irq_start 0 -#define ext_irq_end 0 -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6345 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6348 -#define irq_stat_reg PERF_IRQSTAT_6348_REG -#define irq_mask_reg PERF_IRQMASK_6348_REG -#define irq_bits 32 -#define is_ext_irq_cascaded 0 -#define ext_irq_start 0 -#define ext_irq_end 0 -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6348 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6358 -#define irq_stat_reg PERF_IRQSTAT_6358_REG -#define irq_mask_reg PERF_IRQMASK_6358_REG -#define irq_bits 32 -#define is_ext_irq_cascaded 1 -#define ext_irq_start (BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE) -#define ext_irq_end (BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE) -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6362 -#define irq_stat_reg PERF_IRQSTAT_6362_REG -#define irq_mask_reg PERF_IRQMASK_6362_REG -#define irq_bits 64 -#define is_ext_irq_cascaded 1 -#define ext_irq_start (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE) -#define ext_irq_end (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE) -#define ext_irq_count 4 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6362 -#define ext_irq_cfg_reg2 0 -#endif -#ifdef CONFIG_BCM63XX_CPU_6368 -#define irq_stat_reg PERF_IRQSTAT_6368_REG -#define irq_mask_reg PERF_IRQMASK_6368_REG -#define irq_bits 64 -#define is_ext_irq_cascaded 1 -#define ext_irq_start (BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE) -#define ext_irq_end (BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE) -#define ext_irq_count 6 -#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6368 -#define ext_irq_cfg_reg2 PERF_EXTIRQ_CFG_REG2_6368 -#endif - -#if irq_bits == 32 -#define dispatch_internal __dispatch_internal -#define internal_irq_mask __internal_irq_mask_32 -#define internal_irq_unmask __internal_irq_unmask_32 -#else -#define dispatch_internal __dispatch_internal_64 -#define internal_irq_mask __internal_irq_mask_64 -#define internal_irq_unmask __internal_irq_unmask_64 -#endif -#define irq_stat_addr (bcm63xx_regset_address(RSET_PERF) + irq_stat_reg) -#define irq_mask_addr (bcm63xx_regset_address(RSET_PERF) + irq_mask_reg) +static DEFINE_SPINLOCK(ipic_lock); +static DEFINE_SPINLOCK(epic_lock); -static inline void bcm63xx_init_irq(void) -{ -} -#else /* ! BCMCPU_RUNTIME_DETECT */ - -static u32 irq_stat_addr, irq_mask_addr; -static void (*dispatch_internal)(void); +static u32 irq_stat_addr[2]; +static u32 irq_mask_addr[2]; +static void (*dispatch_internal)(int cpu); static int is_ext_irq_cascaded; static unsigned int ext_irq_count; static unsigned int ext_irq_start, ext_irq_end; static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2; -static void (*internal_irq_mask)(unsigned int irq); -static void (*internal_irq_unmask)(unsigned int irq); +static void (*internal_irq_mask)(struct irq_data *d); +static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m); -static void bcm63xx_init_irq(void) -{ - int irq_bits; - - irq_stat_addr = bcm63xx_regset_address(RSET_PERF); - irq_mask_addr = bcm63xx_regset_address(RSET_PERF); - - switch (bcm63xx_get_cpu_id()) { - case BCM3368_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_3368_REG; - irq_mask_addr += PERF_IRQMASK_3368_REG; - irq_bits = 32; - ext_irq_count = 4; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; - break; - case BCM6328_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6328_REG; - irq_mask_addr += PERF_IRQMASK_6328_REG; - irq_bits = 64; - ext_irq_count = 4; - is_ext_irq_cascaded = 1; - ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE; - ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328; - break; - case BCM6338_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6338_REG; - irq_mask_addr += PERF_IRQMASK_6338_REG; - irq_bits = 32; - ext_irq_count = 4; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338; - break; - case BCM6345_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6345_REG; - irq_mask_addr += PERF_IRQMASK_6345_REG; - irq_bits = 32; - ext_irq_count = 4; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345; - break; - case BCM6348_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6348_REG; - irq_mask_addr += PERF_IRQMASK_6348_REG; - irq_bits = 32; - ext_irq_count = 4; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348; - break; - case BCM6358_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6358_REG; - irq_mask_addr += PERF_IRQMASK_6358_REG; - irq_bits = 32; - ext_irq_count = 4; - is_ext_irq_cascaded = 1; - ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE; - ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; - break; - case BCM6362_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6362_REG; - irq_mask_addr += PERF_IRQMASK_6362_REG; - irq_bits = 64; - ext_irq_count = 4; - is_ext_irq_cascaded = 1; - ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE; - ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362; - break; - case BCM6368_CPU_ID: - irq_stat_addr += PERF_IRQSTAT_6368_REG; - irq_mask_addr += PERF_IRQMASK_6368_REG; - irq_bits = 64; - ext_irq_count = 6; - is_ext_irq_cascaded = 1; - ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE; - ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE; - ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368; - ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368; - break; - default: - BUG(); - } - - if (irq_bits == 32) { - dispatch_internal = __dispatch_internal; - internal_irq_mask = __internal_irq_mask_32; - internal_irq_unmask = __internal_irq_unmask_32; - } else { - dispatch_internal = __dispatch_internal_64; - internal_irq_mask = __internal_irq_mask_64; - internal_irq_unmask = __internal_irq_unmask_64; - } -} -#endif /* ! BCMCPU_RUNTIME_DETECT */ static inline u32 get_ext_irq_perf_reg(int irq) { @@ -252,53 +51,113 @@ static inline void handle_internal(int intbit) do_IRQ(intbit + IRQ_INTERNAL_BASE); } +static inline int enable_irq_for_cpu(int cpu, struct irq_data *d, + const struct cpumask *m) +{ + bool enable = cpu_online(cpu); + +#ifdef CONFIG_SMP + if (m) + enable &= cpu_isset(cpu, *m); + else if (irqd_affinity_was_set(d)) + enable &= cpu_isset(cpu, *d->affinity); +#endif + return enable; +} + /* * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not * prioritize any interrupt relatively to another. the static counter * will resume the loop where it ended the last time we left this * function. */ -static void __dispatch_internal(void) -{ - u32 pending; - static int i; - - pending = bcm_readl(irq_stat_addr) & bcm_readl(irq_mask_addr); - if (!pending) - return ; - - while (1) { - int to_call = i; - - i = (i + 1) & 0x1f; - if (pending & (1 << to_call)) { - handle_internal(to_call); - break; - } - } +#define BUILD_IPIC_INTERNAL(width) \ +void __dispatch_internal_##width(int cpu) \ +{ \ + u32 pending[width / 32]; \ + unsigned int src, tgt; \ + bool irqs_pending = false; \ + static unsigned int i[2]; \ + unsigned int *next = &i[cpu]; \ + unsigned long flags; \ + \ + /* read registers in reverse order */ \ + spin_lock_irqsave(&ipic_lock, flags); \ + for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \ + u32 val; \ + \ + val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \ + val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \ + pending[--tgt] = val; \ + \ + if (val) \ + irqs_pending = true; \ + } \ + spin_unlock_irqrestore(&ipic_lock, flags); \ + \ + if (!irqs_pending) \ + return; \ + \ + while (1) { \ + unsigned int to_call = *next; \ + \ + *next = (*next + 1) & (width - 1); \ + if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \ + handle_internal(to_call); \ + break; \ + } \ + } \ +} \ + \ +static void __internal_irq_mask_##width(struct irq_data *d) \ +{ \ + u32 val; \ + unsigned irq = d->irq - IRQ_INTERNAL_BASE; \ + unsigned reg = (irq / 32) ^ (width/32 - 1); \ + unsigned bit = irq & 0x1f; \ + unsigned long flags; \ + int cpu; \ + \ + spin_lock_irqsave(&ipic_lock, flags); \ + for_each_present_cpu(cpu) { \ + if (!irq_mask_addr[cpu]) \ + break; \ + \ + val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\ + val &= ~(1 << bit); \ + bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\ + } \ + spin_unlock_irqrestore(&ipic_lock, flags); \ +} \ + \ +static void __internal_irq_unmask_##width(struct irq_data *d, \ + const struct cpumask *m) \ +{ \ + u32 val; \ + unsigned irq = d->irq - IRQ_INTERNAL_BASE; \ + unsigned reg = (irq / 32) ^ (width/32 - 1); \ + unsigned bit = irq & 0x1f; \ + unsigned long flags; \ + int cpu; \ + \ + spin_lock_irqsave(&ipic_lock, flags); \ + for_each_present_cpu(cpu) { \ + if (!irq_mask_addr[cpu]) \ + break; \ + \ + val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\ + if (enable_irq_for_cpu(cpu, d, m)) \ + val |= (1 << bit); \ + else \ + val &= ~(1 << bit); \ + bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\ + } \ + spin_unlock_irqrestore(&ipic_lock, flags); \ } -static void __dispatch_internal_64(void) -{ - u64 pending; - static int i; - - pending = bcm_readq(irq_stat_addr) & bcm_readq(irq_mask_addr); - - if (!pending) - return ; - - while (1) { - int to_call = i; - - i = (i + 1) & 0x3f; - if (pending & (1ull << to_call)) { - handle_internal(to_call); - break; - } - } -} +BUILD_IPIC_INTERNAL(32); +BUILD_IPIC_INTERNAL(64); asmlinkage void plat_irq_dispatch(void) { @@ -317,8 +176,11 @@ asmlinkage void plat_irq_dispatch(void) if (cause & CAUSEF_IP1) do_IRQ(1); if (cause & CAUSEF_IP2) - dispatch_internal(); - if (!is_ext_irq_cascaded) { + dispatch_internal(0); + if (is_ext_irq_cascaded) { + if (cause & CAUSEF_IP3) + dispatch_internal(1); + } else { if (cause & CAUSEF_IP3) do_IRQ(IRQ_EXT_0); if (cause & CAUSEF_IP4) @@ -335,50 +197,14 @@ asmlinkage void plat_irq_dispatch(void) * internal IRQs operations: only mask/unmask on PERF irq mask * register. */ -static void __internal_irq_mask_32(unsigned int irq) -{ - u32 mask; - - mask = bcm_readl(irq_mask_addr); - mask &= ~(1 << irq); - bcm_writel(mask, irq_mask_addr); -} - -static void __internal_irq_mask_64(unsigned int irq) -{ - u64 mask; - - mask = bcm_readq(irq_mask_addr); - mask &= ~(1ull << irq); - bcm_writeq(mask, irq_mask_addr); -} - -static void __internal_irq_unmask_32(unsigned int irq) -{ - u32 mask; - - mask = bcm_readl(irq_mask_addr); - mask |= (1 << irq); - bcm_writel(mask, irq_mask_addr); -} - -static void __internal_irq_unmask_64(unsigned int irq) -{ - u64 mask; - - mask = bcm_readq(irq_mask_addr); - mask |= (1ull << irq); - bcm_writeq(mask, irq_mask_addr); -} - static void bcm63xx_internal_irq_mask(struct irq_data *d) { - internal_irq_mask(d->irq - IRQ_INTERNAL_BASE); + internal_irq_mask(d); } static void bcm63xx_internal_irq_unmask(struct irq_data *d) { - internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE); + internal_irq_unmask(d, NULL); } /* @@ -389,8 +215,10 @@ static void bcm63xx_external_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; + unsigned long flags; regaddr = get_ext_irq_perf_reg(irq); + spin_lock_irqsave(&epic_lock, flags); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) @@ -399,16 +227,20 @@ static void bcm63xx_external_irq_mask(struct irq_data *d) reg &= ~EXTIRQ_CFG_MASK(irq % 4); bcm_perf_writel(reg, regaddr); + spin_unlock_irqrestore(&epic_lock, flags); + if (is_ext_irq_cascaded) - internal_irq_mask(irq + ext_irq_start); + internal_irq_mask(irq_get_irq_data(irq + ext_irq_start)); } static void bcm63xx_external_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; + unsigned long flags; regaddr = get_ext_irq_perf_reg(irq); + spin_lock_irqsave(&epic_lock, flags); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) @@ -417,17 +249,21 @@ static void bcm63xx_external_irq_unmask(struct irq_data *d) reg |= EXTIRQ_CFG_MASK(irq % 4); bcm_perf_writel(reg, regaddr); + spin_unlock_irqrestore(&epic_lock, flags); if (is_ext_irq_cascaded) - internal_irq_unmask(irq + ext_irq_start); + internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start), + NULL); } static void bcm63xx_external_irq_clear(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; + unsigned long flags; regaddr = get_ext_irq_perf_reg(irq); + spin_lock_irqsave(&epic_lock, flags); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) @@ -436,6 +272,7 @@ static void bcm63xx_external_irq_clear(struct irq_data *d) reg |= EXTIRQ_CFG_CLEAR(irq % 4); bcm_perf_writel(reg, regaddr); + spin_unlock_irqrestore(&epic_lock, flags); } static int bcm63xx_external_irq_set_type(struct irq_data *d, @@ -444,6 +281,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; int levelsense, sense, bothedge; + unsigned long flags; flow_type &= IRQ_TYPE_SENSE_MASK; @@ -478,6 +316,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, } regaddr = get_ext_irq_perf_reg(irq); + spin_lock_irqsave(&epic_lock, flags); reg = bcm_perf_readl(regaddr); irq %= 4; @@ -522,6 +361,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, } bcm_perf_writel(reg, regaddr); + spin_unlock_irqrestore(&epic_lock, flags); irqd_set_trigger_type(d, flow_type); if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) @@ -532,6 +372,18 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, return IRQ_SET_MASK_OK_NOCOPY; } +#ifdef CONFIG_SMP +static int bcm63xx_internal_set_affinity(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + if (!irqd_irq_disabled(data)) + internal_irq_unmask(data, dest); + + return 0; +} +#endif + static struct irq_chip bcm63xx_internal_irq_chip = { .name = "bcm63xx_ipic", .irq_mask = bcm63xx_internal_irq_mask, @@ -554,12 +406,130 @@ static struct irqaction cpu_ip2_cascade_action = { .flags = IRQF_NO_THREAD, }; +#ifdef CONFIG_SMP +static struct irqaction cpu_ip3_cascade_action = { + .handler = no_action, + .name = "cascade_ip3", + .flags = IRQF_NO_THREAD, +}; +#endif + static struct irqaction cpu_ext_cascade_action = { .handler = no_action, .name = "cascade_extirq", .flags = IRQF_NO_THREAD, }; +static void bcm63xx_init_irq(void) +{ + int irq_bits; + + irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF); + irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF); + irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF); + irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF); + + switch (bcm63xx_get_cpu_id()) { + case BCM3368_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_3368_REG; + irq_mask_addr[0] += PERF_IRQMASK_3368_REG; + irq_stat_addr[1] = 0; + irq_mask_addr[1] = 0; + irq_bits = 32; + ext_irq_count = 4; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; + break; + case BCM6328_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0); + irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0); + irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1); + irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1); + irq_bits = 64; + ext_irq_count = 4; + is_ext_irq_cascaded = 1; + ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE; + ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328; + break; + case BCM6338_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6338_REG; + irq_mask_addr[0] += PERF_IRQMASK_6338_REG; + irq_stat_addr[1] = 0; + irq_mask_addr[1] = 0; + irq_bits = 32; + ext_irq_count = 4; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338; + break; + case BCM6345_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6345_REG; + irq_mask_addr[0] += PERF_IRQMASK_6345_REG; + irq_stat_addr[1] = 0; + irq_mask_addr[1] = 0; + irq_bits = 32; + ext_irq_count = 4; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345; + break; + case BCM6348_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6348_REG; + irq_mask_addr[0] += PERF_IRQMASK_6348_REG; + irq_stat_addr[1] = 0; + irq_mask_addr[1] = 0; + irq_bits = 32; + ext_irq_count = 4; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348; + break; + case BCM6358_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0); + irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0); + irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1); + irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1); + irq_bits = 32; + ext_irq_count = 4; + is_ext_irq_cascaded = 1; + ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE; + ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; + break; + case BCM6362_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0); + irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0); + irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1); + irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1); + irq_bits = 64; + ext_irq_count = 4; + is_ext_irq_cascaded = 1; + ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE; + ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362; + break; + case BCM6368_CPU_ID: + irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0); + irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0); + irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1); + irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1); + irq_bits = 64; + ext_irq_count = 6; + is_ext_irq_cascaded = 1; + ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE; + ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368; + ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368; + break; + default: + BUG(); + } + + if (irq_bits == 32) { + dispatch_internal = __dispatch_internal_32; + internal_irq_mask = __internal_irq_mask_32; + internal_irq_unmask = __internal_irq_unmask_32; + } else { + dispatch_internal = __dispatch_internal_64; + internal_irq_mask = __internal_irq_mask_64; + internal_irq_unmask = __internal_irq_unmask_64; + } +} + void __init arch_init_irq(void) { int i; @@ -580,4 +550,14 @@ void __init arch_init_irq(void) } setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action); +#ifdef CONFIG_SMP + if (is_ext_irq_cascaded) { + setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action); + bcm63xx_internal_irq_chip.irq_set_affinity = + bcm63xx_internal_set_affinity; + + cpumask_clear(irq_default_affinity); + cpumask_set_cpu(smp_processor_id(), irq_default_affinity); + } +#endif } diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index acbeb1fe7c57..d1fe51edf5e6 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -125,8 +125,6 @@ #define BCM6368_RESET_PCIE 0 #define BCM6368_RESET_PCIE_EXT 0 -#ifdef BCMCPU_RUNTIME_DETECT - /* * core reset bits */ @@ -188,64 +186,6 @@ static int __init bcm63xx_reset_bits_init(void) return 0; } -#else - -#ifdef CONFIG_BCM63XX_CPU_3368 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(3368) -}; -#define reset_reg PERF_SOFTRESET_6358_REG -#endif - -#ifdef CONFIG_BCM63XX_CPU_6328 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(6328) -}; -#define reset_reg PERF_SOFTRESET_6328_REG -#endif - -#ifdef CONFIG_BCM63XX_CPU_6338 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(6338) -}; -#define reset_reg PERF_SOFTRESET_REG -#endif - -#ifdef CONFIG_BCM63XX_CPU_6345 -static const u32 bcm63xx_reset_bits[] = { }; -#define reset_reg 0 -#endif - -#ifdef CONFIG_BCM63XX_CPU_6348 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(6348) -}; -#define reset_reg PERF_SOFTRESET_REG -#endif - -#ifdef CONFIG_BCM63XX_CPU_6358 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(6358) -}; -#define reset_reg PERF_SOFTRESET_6358_REG -#endif - -#ifdef CONFIG_BCM63XX_CPU_6362 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(6362) -}; -#define reset_reg PERF_SOFTRESET_6362_REG -#endif - -#ifdef CONFIG_BCM63XX_CPU_6368 -static const u32 bcm63xx_reset_bits[] = { - __GEN_RESET_BITS_TABLE(6368) -}; -#define reset_reg PERF_SOFTRESET_6368_REG -#endif - -static int __init bcm63xx_reset_bits_init(void) { return 0; } -#endif static DEFINE_SPINLOCK(reset_mutex); diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore index a73d6e2c4f64..d3962cd5ce0c 100644 --- a/arch/mips/boot/.gitignore +++ b/arch/mips/boot/.gitignore @@ -5,3 +5,4 @@ zImage zImage.tmp calc_vmlinuz_load_addr uImage +*.dtb diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index c00c4ddf4514..31903cf9709d 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/string.h> #include <asm/addrspace.h> @@ -67,10 +68,24 @@ void error(char *x) #include "../../../../lib/decompress_unxz.c" #endif +unsigned long __stack_chk_guard; + +void __stack_chk_guard_setup(void) +{ + __stack_chk_guard = 0x000a0dff; +} + +void __stack_chk_fail(void) +{ + error("stack-protector: Kernel stack is corrupted\n"); +} + void decompress_kernel(unsigned long boot_heap_start) { unsigned long zimage_start, zimage_size; + __stack_chk_guard_setup(); + zimage_start = (unsigned long)(&__image_begin); zimage_size = (unsigned long)(&__image_end) - (unsigned long)(&__image_begin); diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile new file mode 100644 index 000000000000..ca9c90e2cabf --- /dev/null +++ b/arch/mips/boot/dts/Makefile @@ -0,0 +1,20 @@ +dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb +dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb +dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb +dtb-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb +dtb-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb +dtb-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb +dtb-$(CONFIG_DTB_RT2880_EVAL) += rt2880_eval.dtb +dtb-$(CONFIG_DTB_RT305X_EVAL) += rt3052_eval.dtb +dtb-$(CONFIG_DTB_RT3883_EVAL) += rt3883_eval.dtb +dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb +dtb-$(CONFIG_MIPS_SEAD3) += sead3.dtb + +obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) + +targets += dtbs +targets += $(dtb-y) + +dtbs: $(addprefix $(obj)/, $(dtb-y)) + +clean-files += *.dtb *.dtb.S diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/boot/dts/danube.dtsi index d4c59e003708..d4c59e003708 100644 --- a/arch/mips/lantiq/dts/danube.dtsi +++ b/arch/mips/boot/dts/danube.dtsi diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/boot/dts/easy50712.dts index 143b8a37b5e4..143b8a37b5e4 100644 --- a/arch/mips/lantiq/dts/easy50712.dts +++ b/arch/mips/boot/dts/easy50712.dts diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/boot/dts/mt7620a.dtsi index 08bf24fefe9f..08bf24fefe9f 100644 --- a/arch/mips/ralink/dts/mt7620a.dtsi +++ b/arch/mips/boot/dts/mt7620a.dtsi diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/boot/dts/mt7620a_eval.dts index 709f58132f5c..709f58132f5c 100644 --- a/arch/mips/ralink/dts/mt7620a_eval.dts +++ b/arch/mips/boot/dts/mt7620a_eval.dts diff --git a/arch/mips/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/octeon_3xxx.dts index fa33115bde33..fa33115bde33 100644 --- a/arch/mips/cavium-octeon/octeon_3xxx.dts +++ b/arch/mips/boot/dts/octeon_3xxx.dts diff --git a/arch/mips/cavium-octeon/octeon_68xx.dts b/arch/mips/boot/dts/octeon_68xx.dts index 79b46fcb0a11..79b46fcb0a11 100644 --- a/arch/mips/cavium-octeon/octeon_68xx.dts +++ b/arch/mips/boot/dts/octeon_68xx.dts diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/boot/dts/rt2880.dtsi index 182afde2f2e1..182afde2f2e1 100644 --- a/arch/mips/ralink/dts/rt2880.dtsi +++ b/arch/mips/boot/dts/rt2880.dtsi diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/boot/dts/rt2880_eval.dts index 0a685db093d4..0a685db093d4 100644 --- a/arch/mips/ralink/dts/rt2880_eval.dts +++ b/arch/mips/boot/dts/rt2880_eval.dts diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/boot/dts/rt3050.dtsi index e3203d414fee..e3203d414fee 100644 --- a/arch/mips/ralink/dts/rt3050.dtsi +++ b/arch/mips/boot/dts/rt3050.dtsi diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/boot/dts/rt3052_eval.dts index ec9e9a035541..ec9e9a035541 100644 --- a/arch/mips/ralink/dts/rt3052_eval.dts +++ b/arch/mips/boot/dts/rt3052_eval.dts diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/boot/dts/rt3883.dtsi index 3b131dd0d5ac..3b131dd0d5ac 100644 --- a/arch/mips/ralink/dts/rt3883.dtsi +++ b/arch/mips/boot/dts/rt3883.dtsi diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/boot/dts/rt3883_eval.dts index e8df21a5d10d..e8df21a5d10d 100644 --- a/arch/mips/ralink/dts/rt3883_eval.dts +++ b/arch/mips/boot/dts/rt3883_eval.dts diff --git a/arch/mips/mti-sead3/sead3.dts b/arch/mips/boot/dts/sead3.dts index e4b317d414f1..e4b317d414f1 100644 --- a/arch/mips/mti-sead3/sead3.dts +++ b/arch/mips/boot/dts/sead3.dts diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/boot/dts/xlp_evp.dts index 89ad04808c02..89ad04808c02 100644 --- a/arch/mips/netlogic/dts/xlp_evp.dts +++ b/arch/mips/boot/dts/xlp_evp.dts diff --git a/arch/mips/netlogic/dts/xlp_fvp.dts b/arch/mips/boot/dts/xlp_fvp.dts index 63e62b7bd758..63e62b7bd758 100644 --- a/arch/mips/netlogic/dts/xlp_fvp.dts +++ b/arch/mips/boot/dts/xlp_fvp.dts diff --git a/arch/mips/netlogic/dts/xlp_gvp.dts b/arch/mips/boot/dts/xlp_gvp.dts index bb4ecd1d47fc..bb4ecd1d47fc 100644 --- a/arch/mips/netlogic/dts/xlp_gvp.dts +++ b/arch/mips/boot/dts/xlp_gvp.dts diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/boot/dts/xlp_svp.dts index 1ebd00edaacc..1ebd00edaacc 100644 --- a/arch/mips/netlogic/dts/xlp_svp.dts +++ b/arch/mips/boot/dts/xlp_svp.dts diff --git a/arch/mips/cavium-octeon/.gitignore b/arch/mips/cavium-octeon/.gitignore deleted file mode 100644 index 39c968605ff6..000000000000 --- a/arch/mips/cavium-octeon/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.dtb.S -*.dtb diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 602866657938..c370426a7322 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -1,7 +1,7 @@ if CPU_CAVIUM_OCTEON config CAVIUM_CN63XXP1 - bool "Enable CN63XXP1 errata worarounds" + bool "Enable CN63XXP1 errata workarounds" default "n" help The CN63XXP1 chip requires build time workarounds to diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index 4e952043c922..42f5f1a4b40a 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -20,13 +20,3 @@ obj-y += executive/ obj-$(CONFIG_MTD) += flash_setup.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o - -DTS_FILES = octeon_3xxx.dts octeon_68xx.dts -DTB_FILES = $(patsubst %.dts, %.dtb, $(DTS_FILES)) - -obj-y += $(patsubst %.dts, %.dtb.o, $(DTS_FILES)) - -# Let's keep the .dtb files around in case we want to look at them. -.SECONDARY: $(addprefix $(obj)/, $(DTB_FILES)) - -clean-files += $(DTB_FILES) $(patsubst %.dtb, %.dtb.S, $(DTB_FILES)) diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index b764df64be40..5dfef84b9576 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -186,6 +186,15 @@ int cvmx_helper_board_get_mii_address(int ipd_port) return 7 - ipd_port; else return -1; + case CVMX_BOARD_TYPE_CUST_DSR1000N: + /* + * Port 2 connects to Broadcom PHY (B5081). Other ports (0-1) + * connect to a switch (BCM53115). + */ + if (ipd_port == 2) + return 8; + else + return -1; } /* Some unknown board. Somebody forgot to update this function... */ @@ -274,6 +283,18 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port) return result; } break; + case CVMX_BOARD_TYPE_CUST_DSR1000N: + if (ipd_port == 0 || ipd_port == 1) { + /* Ports 0 and 1 connect to a switch (BCM53115). */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + } else { + /* Port 2 uses a Broadcom PHY (B5081). */ + is_broadcom_phy = 1; + } + break; } phy_addr = cvmx_helper_board_get_mii_address(ipd_port); @@ -738,6 +759,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo case CVMX_BOARD_TYPE_LANAI2_G: case CVMX_BOARD_TYPE_NIC10E_66: case CVMX_BOARD_TYPE_UBNT_E100: + case CVMX_BOARD_TYPE_CUST_DSR1000N: return USB_CLOCK_TYPE_CRYSTAL_12; case CVMX_BOARD_TYPE_NIC10E: return USB_CLOCK_TYPE_REF_12; diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c index 45f18cce31a9..6f9609e63a65 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c @@ -317,10 +317,14 @@ static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports) for (index = 0; index < num_ports; index++) { int ipd_port = cvmx_helper_get_ipd_port(interface, index); __cvmx_helper_sgmii_hardware_init_one_time(interface, index); - __cvmx_helper_sgmii_link_set(ipd_port, - __cvmx_helper_sgmii_link_get - (ipd_port)); - + /* Linux kernel driver will call ....link_set with the + * proper link state. In the simulator there is no + * link state polling and hence it is set from + * here. + */ + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) + __cvmx_helper_sgmii_link_set(ipd_port, + __cvmx_helper_sgmii_link_get(ipd_port)); } return 0; diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c index 71b213dbb621..2d68a39f1443 100644 --- a/arch/mips/cavium-octeon/oct_ilm.c +++ b/arch/mips/cavium-octeon/oct_ilm.c @@ -194,8 +194,7 @@ err_irq: static __exit void oct_ilm_module_exit(void) { disable_timer(TIMER_NUM); - if (dir) - debugfs_remove_recursive(dir); + debugfs_remove_recursive(dir); free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0); } diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 1b82ac6921e0..741734049675 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -264,13 +264,13 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; - raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); + raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); cd.p = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); if (cd.s.line == 0) { - pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); + pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); __set_bit(cd.s.bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before @@ -279,7 +279,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); } else { - pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); + pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); __set_bit(cd.s.bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before @@ -296,13 +296,13 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; - raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); + raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); cd.p = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); if (cd.s.line == 0) { - pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); + pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); __clear_bit(cd.s.bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before @@ -311,7 +311,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) wmb(); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); } else { - pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); + pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); __clear_bit(cd.s.bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before @@ -431,11 +431,11 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) if (cd.s.line == 0) { int index = cvmx_get_core_num() * 2; - set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); + set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; - set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); + set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } @@ -450,11 +450,11 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) if (cd.s.line == 0) { int index = cvmx_get_core_num() * 2; - clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); + clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; - clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); + clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } @@ -1063,7 +1063,7 @@ static void octeon_irq_ip2_ciu(void) const unsigned long core_id = cvmx_get_core_num(); u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); - ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); + ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[0][bit]; @@ -1080,7 +1080,7 @@ static void octeon_irq_ip3_ciu(void) { u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); - ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); + ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); if (likely(ciu_sum)) { int bit = fls64(ciu_sum) - 1; int irq = octeon_irq_ciu_to_irq[1][bit]; @@ -1129,10 +1129,10 @@ static void octeon_irq_init_ciu_percpu(void) int coreid = cvmx_get_core_num(); - __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; - __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; + __this_cpu_write(octeon_irq_ciu0_en_mirror, 0); + __this_cpu_write(octeon_irq_ciu1_en_mirror, 0); wmb(); - raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); + raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock)); /* * Disable All CIU Interrupts. The ones we need will be * enabled later. Read the SUM register so we know the write diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 008e9c8b8eac..38f4c32e2816 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -263,7 +263,6 @@ static uint64_t crashk_size, crashk_base; static int octeon_uart; extern asmlinkage void handle_int(void); -extern asmlinkage void plat_irq_dispatch(void); /** * Return non zero if we are currently running in the Octeon simulator @@ -458,6 +457,18 @@ static void octeon_halt(void) octeon_kill_core(NULL); } +static char __read_mostly octeon_system_type[80]; + +static int __init init_octeon_system_type(void) +{ + snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)", + cvmx_board_type_to_string(octeon_bootinfo->board_type), + octeon_model_get_string(read_c0_prid())); + + return 0; +} +early_initcall(init_octeon_system_type); + /** * Return a string representing the system type * @@ -465,11 +476,7 @@ static void octeon_halt(void) */ const char *octeon_board_type_string(void) { - static char name[80]; - sprintf(name, "%s (%s)", - cvmx_board_type_to_string(octeon_bootinfo->board_type), - octeon_model_get_string(read_c0_prid())); - return name; + return octeon_system_type; } const char *get_system_type(void) diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index a7b3ae104d8c..ecd903dd1c45 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -84,9 +84,14 @@ static void octeon_smp_hotplug_setup(void) #ifdef CONFIG_HOTPLUG_CPU struct linux_app_boot_info *labi; + if (!setup_max_cpus) + return; + labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); - if (labi->labi_signature != LABI_SIGNATURE) - panic("The bootloader version on this board is incorrect."); + if (labi->labi_signature != LABI_SIGNATURE) { + pr_info("The bootloader on this board does not support HOTPLUG_CPU."); + return; + } octeon_bootloader_entry_addr = labi->InitTLBStart_addr; #endif @@ -129,7 +134,8 @@ static void octeon_smp_setup(void) * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ - for (id = 0; id < num_cores && id < NR_CPUS; id++) { + for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && + id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; @@ -192,14 +198,6 @@ static void octeon_init_secondary(void) */ void octeon_prepare_cpus(unsigned int max_cpus) { -#ifdef CONFIG_HOTPLUG_CPU - struct linux_app_boot_info *labi; - - labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); - - if (labi->labi_signature != LABI_SIGNATURE) - panic("The bootloader version on this board is incorrect."); -#endif /* * Only the low order mailbox bits are used for IPIs, leave * the other bits alone. @@ -237,6 +235,9 @@ static int octeon_cpu_disable(void) if (cpu == 0) return -EBUSY; + if (!octeon_bootloader_entry_addr) + return -ENOTSUPP; + set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); local_irq_disable(); diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index dace58268ce1..b2476a1c4aaa 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -124,7 +124,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_STAGING=y CONFIG_OCTEON_ETHERNET=y -# CONFIG_NET_VENDOR_SILICOM is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig index a64b30b96a0d..46e8f7676a15 100644 --- a/arch/mips/configs/db1xxx_defconfig +++ b/arch/mips/configs/db1xxx_defconfig @@ -116,7 +116,6 @@ CONFIG_MTD_NAND_PLATFORM=y CONFIG_MTD_SPI_NOR=y CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y -CONFIG_SCSI_TGT=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_MULTI_LUN=y diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig index 87d0340837aa..ebc011c51e5a 100644 --- a/arch/mips/configs/decstation_defconfig +++ b/arch/mips/configs/decstation_defconfig @@ -45,7 +45,6 @@ CONFIG_VLAN_8021Q=m CONFIG_CONNECTOR=m CONFIG_BLK_DEV_LOOP=m CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 8f219dac9598..e24feb0633aa 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PCI=y CONFIG_BINFMT_MISC=m +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig index 936ec5a5ed8d..57ed466e00db 100644 --- a/arch/mips/configs/ip22_defconfig +++ b/arch/mips/configs/ip22_defconfig @@ -219,7 +219,6 @@ CONFIG_ATA_OVER_ETH=m # CONFIG_MISC_DEVICES is not set CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 0e36abcd39cc..48e16d98b2cc 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -28,6 +28,7 @@ CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_PM=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -106,7 +107,6 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m # CONFIG_MISC_DEVICES is not set CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=m diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig index 7bbd52194fc3..70ffe9b55829 100644 --- a/arch/mips/configs/ip32_defconfig +++ b/arch/mips/configs/ip32_defconfig @@ -54,7 +54,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_SGI_IOC4=y CONFIG_RAID_ATTRS=y CONFIG_SCSI=y -CONFIG_SCSI_TGT=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 0315ee37a20b..4f37a5985459 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_BINFMT_MISC=m CONFIG_PM=y +CONFIG_NET=y CONFIG_PACKET=m CONFIG_UNIX=y CONFIG_NET_KEY=m @@ -208,7 +209,6 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index ea1761f0f917..1c6191ebd583 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -1,6 +1,6 @@ CONFIG_MACH_LOONGSON=y CONFIG_SWIOTLB=y -CONFIG_LEMOTE_MACH3A=y +CONFIG_LOONGSON_MACH3X=y CONFIG_CPU_LOONGSON3=y CONFIG_64BIT=y CONFIG_PAGE_SIZE_16KB=y @@ -59,6 +59,7 @@ CONFIG_MIPS32_COMPAT=y CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_PM_RUNTIME=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=y @@ -120,7 +121,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_RAID_ATTRS=m -CONFIG_SCSI_TGT=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index b745b6a9f322..f57b96dcf7df 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PCI=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -253,7 +254,6 @@ CONFIG_BLK_DEV_IT8213=m CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=m -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 4f7d952d8517..d41742dd26c8 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -20,6 +20,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PCI=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -254,7 +255,6 @@ CONFIG_BLK_DEV_IT8213=m CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=m -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index e36681c24ddc..a7806e83ea0f 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PCI=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -254,7 +255,6 @@ CONFIG_BLK_DEV_IT8213=m CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=m -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig index 4c2c0c4b9bb1..0f08e4623ee4 100644 --- a/arch/mips/configs/markeins_defconfig +++ b/arch/mips/configs/markeins_defconfig @@ -134,7 +134,6 @@ CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_SGI_IOC4=m CONFIG_SCSI=m -CONFIG_SCSI_TGT=m # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_SG=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index d269a5326a30..9b6926d6bb32 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -27,6 +27,7 @@ CONFIG_PD6729=m CONFIG_I82092=m CONFIG_BINFMT_MISC=m CONFIG_PM=y +CONFIG_NET=y CONFIG_PACKET=m CONFIG_UNIX=y CONFIG_XFRM_USER=m diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 5468b1c7b2a5..70509a48df82 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig @@ -63,6 +63,7 @@ CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_PM_RUNTIME=y CONFIG_PM_DEBUG=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -334,7 +335,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_CDROM_PKTCDVD=y CONFIG_RAID_ATTRS=m -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -346,10 +346,8 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_TGT_ATTRS=y CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index 44b473420d51..82207e8079f3 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig @@ -43,6 +43,7 @@ CONFIG_PCI_DEBUG=y CONFIG_BINFMT_MISC=m CONFIG_PM_RUNTIME=y CONFIG_PM_DEBUG=y +CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -311,7 +312,6 @@ CONFIG_CDROM_PKTCDVD=y CONFIG_MISC_DEVICES=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -323,10 +323,8 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_TGT_ATTRS=y CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_SRP_TGT_ATTRS=y CONFIG_ISCSI_TCP=m CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 73e7bf49461c..db029f4ff759 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -20,6 +20,7 @@ CONFIG_MODVERSIONS=y CONFIG_PCI=y CONFIG_BINFMT_MISC=m CONFIG_PM=y +CONFIG_NET=y CONFIG_PACKET=m CONFIG_UNIX=y CONFIG_NET_KEY=m @@ -221,7 +222,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_SGI_IOC4=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig index 0abe681c11a0..dae9354b6256 100644 --- a/arch/mips/configs/sead3_defconfig +++ b/arch/mips/configs/sead3_defconfig @@ -31,8 +31,8 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig index 2a0da5bf4b64..cd91a775c74e 100644 --- a/arch/mips/configs/sead3micro_defconfig +++ b/arch/mips/configs/sead3micro_defconfig @@ -32,8 +32,8 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig index d99b1905a1ba..9327b3af32cd 100644 --- a/arch/mips/configs/tb0226_defconfig +++ b/arch/mips/configs/tb0226_defconfig @@ -39,7 +39,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_XIP=y # CONFIG_MISC_DEVICES is not set CONFIG_SCSI=y -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_SCAN_ASYNC=y diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig index c415c4f0e5c2..a967289b7970 100644 --- a/arch/mips/configs/tb0287_defconfig +++ b/arch/mips/configs/tb0287_defconfig @@ -44,7 +44,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_XIP=y # CONFIG_MISC_DEVICES is not set -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SCAN_ASYNC=y # CONFIG_SCSI_LOWLEVEL is not set diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 05439187891d..72e1cf1cab00 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,8 +1,10 @@ # MIPS headers generic-y += cputime.h generic-y += current.h +generic-y += dma-contiguous.h generic-y += emergency-restart.h generic-y += hash.h +generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mutex.h @@ -15,4 +17,5 @@ generic-y += segment.h generic-y += serial.h generic-y += trace_clock.h generic-y += ucontext.h +generic-y += user.h generic-y += xor.h diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index 909bb6984866..7186bb51b89b 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h @@ -13,13 +13,11 @@ #include <asm/siginfo.h> struct mips_abi { - int (* const setup_frame)(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, - sigset_t *set); + int (* const setup_frame)(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set); const unsigned long signal_return_offset; - int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, - sigset_t *set, siginfo_t *info); + int (* const setup_rt_frame)(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set); const unsigned long rt_signal_return_offset; const unsigned long restart; }; diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h index 3f745459fdb5..3b0e51d5a613 100644 --- a/arch/mips/include/asm/addrspace.h +++ b/arch/mips/include/asm/addrspace.h @@ -52,7 +52,7 @@ */ #define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) #define XPHYSADDR(a) ((_ACAST64_(a)) & \ - _CONST64_(0x000000ffffffffff)) + _CONST64_(0x0000ffffffffffff)) #ifdef CONFIG_64BIT diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 935543f14538..cd9a98bc8f60 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -10,6 +10,7 @@ #include <asm/hazards.h> #include <asm/asm-offsets.h> +#include <asm/msa.h> #ifdef CONFIG_32BIT #include <asm/asmmacro-32.h> @@ -378,9 +379,19 @@ st_d 29, THREAD_FPR29, \thread st_d 30, THREAD_FPR30, \thread st_d 31, THREAD_FPR31, \thread + .set push + .set noat + cfcmsa $1, MSA_CSR + sw $1, THREAD_MSA_CSR(\thread) + .set pop .endm .macro msa_restore_all thread + .set push + .set noat + lw $1, THREAD_MSA_CSR(\thread) + ctcmsa MSA_CSR, $1 + .set pop ld_d 0, THREAD_FPR0, \thread ld_d 1, THREAD_FPR1, \thread ld_d 2, THREAD_FPR2, \thread @@ -415,4 +426,24 @@ ld_d 31, THREAD_FPR31, \thread .endm + .macro msa_init_upper wd +#ifdef CONFIG_64BIT + insert_d \wd, 1 +#else + insert_w \wd, 2 + insert_w \wd, 3 +#endif + .if 31-\wd + msa_init_upper (\wd+1) + .endif + .endm + + .macro msa_init_all_upper + .set push + .set noat + not $1, zero + msa_init_upper 0 + .set pop + .endm + #endif /* _ASM_ASMMACRO_H */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 37b2befe651a..6dd6bfc607e9 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) /* * atomic_set - set atomic variable @@ -40,195 +40,103 @@ */ #define atomic_set(v, i) ((v)->counter = (i)) -/* - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); - } -} - -/* - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); - } -} - -/* - * Same as above, but return the result value - */ -static __inline__ int atomic_add_return(int i, atomic_t * v) -{ - int result; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqzl %0, 1b \n" - " addu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" - " sc %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!result)); - - result = temp + i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result += i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; +#define ATOMIC_OP(op, c_op, asm_op) \ +static __inline__ void atomic_##op(int i, atomic_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %0, %1 # atomic_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " ll %0, %1 # atomic_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} \ + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + int result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ } -static __inline__ int atomic_sub_return(int i, atomic_t * v) -{ - int result; +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) - smp_mb__before_llsc(); +ATOMIC_OPS(add, +=, addu) +ATOMIC_OPS(sub, -=, subu) - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqzl %0, 1b \n" - " subu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - - result = temp - i; - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!result)); - - result = temp - i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP /* * atomic_sub_if_positive - conditionally subtract integer from atomic variable @@ -398,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) /* * atomic64_set - set atomic variable @@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) */ #define atomic64_set(v, i) ((v)->counter = (i)) -/* - * atomic64_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %0, %1 # atomic64_add \n" - " daddu %0, %2 \n" - " scd %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %0, %1 # atomic64_add \n" - " daddu %0, %2 \n" - " scd %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); - } +#define ATOMIC64_OP(op, c_op, asm_op) \ +static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %0, %1 # atomic64_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " lld %0, %1 # atomic64_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} \ + +#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ +static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +{ \ + long result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ + : "Ir" (i), "m" (v->counter) \ + : "memory"); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ } -/* - * atomic64_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %0, %1 # atomic64_sub \n" - " dsubu %0, %2 \n" - " scd %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %0, %1 # atomic64_sub \n" - " dsubu %0, %2 \n" - " scd %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); - } -} - -/* - * Same as above, but return the result value - */ -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - long result; +#define ATOMIC64_OPS(op, c_op, asm_op) \ + ATOMIC64_OP(op, c_op, asm_op) \ + ATOMIC64_OP_RETURN(op, c_op, asm_op) - smp_mb__before_llsc(); +ATOMIC64_OPS(add, +=, daddu) +ATOMIC64_OPS(sub, -=, dsubu) - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %1, %2 # atomic64_add_return \n" - " daddu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqzl %0, 1b \n" - " daddu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %1, %2 # atomic64_add_return \n" - " daddu %0, %1, %3 \n" - " scd %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } while (unlikely(!result)); - - result = temp + i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result += i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} - -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - long result; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %1, %2 # atomic64_sub_return \n" - " dsubu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqzl %0, 1b \n" - " dsubu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %1, %2 # atomic64_sub_return \n" - " dsubu %0, %1, %3 \n" - " scd %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } while (unlikely(!result)); - - result = temp - i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP /* * atomic64_sub_if_positive - conditionally subtract integer from atomic variable diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 7c8816f7b7c4..bae6b0fa8ab5 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -559,7 +559,13 @@ static inline int fls(int x) int r; if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { - __asm__("clz %0, %1" : "=r" (x) : "r" (x)); + __asm__( + " .set push \n" + " .set mips32 \n" + " clz %0, %1 \n" + " .set pop \n" + : "=r" (x) + : "r" (x)); return 32 - x; } diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index c1516cc0285f..51f80bd36fcc 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h @@ -16,8 +16,8 @@ extern void octeon_cop2_save(struct octeon_cop2_state *); extern void octeon_cop2_restore(struct octeon_cop2_state *); -#define cop2_save(r) octeon_cop2_save(r) -#define cop2_restore(r) octeon_cop2_restore(r) +#define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2) +#define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2) #define cop2_present 1 #define cop2_lazy_restore 1 @@ -26,18 +26,26 @@ extern void octeon_cop2_restore(struct octeon_cop2_state *); extern void nlm_cop2_save(struct nlm_cop2_state *); extern void nlm_cop2_restore(struct nlm_cop2_state *); -#define cop2_save(r) nlm_cop2_save(r) -#define cop2_restore(r) nlm_cop2_restore(r) + +#define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2) +#define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2) #define cop2_present 1 #define cop2_lazy_restore 0 +#elif defined(CONFIG_CPU_LOONGSON3) + +#define cop2_present 1 +#define cop2_lazy_restore 1 +#define cop2_save(r) do { (r); } while (0) +#define cop2_restore(r) do { (r); } while (0) + #else #define cop2_present 0 #define cop2_lazy_restore 0 -#define cop2_save(r) -#define cop2_restore(r) +#define cop2_save(r) do { (r); } while (0) +#define cop2_restore(r) do { (r); } while (0) #endif enum cu2_ops { diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index c7d8c997d93e..3325f3eb248c 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -29,6 +29,15 @@ #ifndef cpu_has_eva #define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA) #endif +#ifndef cpu_has_htw +#define cpu_has_htw (cpu_data[0].options & MIPS_CPU_HTW) +#endif +#ifndef cpu_has_rixiex +#define cpu_has_rixiex (cpu_data[0].options & MIPS_CPU_RIXIEX) +#endif +#ifndef cpu_has_maar +#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) +#endif /* * For the moment we don't consider R6000 and R8000 so we can assume that @@ -222,6 +231,16 @@ #define cpu_has_clo_clz cpu_has_mips_r #endif +/* + * MIPS32 R2, MIPS64 R2, Loongson 3A and Octeon have WSBH. + * MIPS64 R2, Loongson 3A and Octeon have WSBH, DSBH and DSHD. + * This indicates the availability of WSBH and in case of 64 bit CPUs also + * DSBH and DSHD. + */ +#ifndef cpu_has_wsbh +#define cpu_has_wsbh cpu_has_mips_r2 +#endif + #ifndef cpu_has_dsp #define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP) #endif diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index 47d5967ce7ef..a6c9ccb33c5c 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -44,8 +44,8 @@ struct cpuinfo_mips { /* * Capability and feature descriptor structure for MIPS CPU */ - unsigned long options; unsigned long ases; + unsigned long long options; unsigned int udelay_val; unsigned int processor_id; unsigned int fpu_id; @@ -61,6 +61,7 @@ struct cpuinfo_mips { struct cache_desc scache; /* Secondary cache */ struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ + int package;/* physical package number */ int core; /* physical core number */ #ifdef CONFIG_64BIT int vmbits; /* Virtual memory size in bits */ @@ -78,6 +79,11 @@ struct cpuinfo_mips { #define NUM_WATCH_REGS 4 u16 watch_reg_masks[NUM_WATCH_REGS]; unsigned int kscratch_mask; /* Usable KScratch mask. */ + /* + * Cache Coherency attribute for write-combine memory writes. + * (shifted by _CACHE_SHIFT) + */ + unsigned int writecombine; } __attribute__((aligned(SMP_CACHE_BYTES))); extern struct cpuinfo_mips cpu_data[]; @@ -115,7 +121,7 @@ struct proc_cpuinfo_notifier_args { #ifdef CONFIG_MIPS_MT_SMP # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) #else -# define cpu_vpe_id(cpuinfo) 0 +# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; }) #endif #endif /* __ASM_CPU_INFO_H */ diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 129d08701e91..dfdc77ed1839 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -233,6 +233,8 @@ #define PRID_REV_LOONGSON2E 0x0002 #define PRID_REV_LOONGSON2F 0x0003 #define PRID_REV_LOONGSON3A 0x0005 +#define PRID_REV_LOONGSON3B_R1 0x0006 +#define PRID_REV_LOONGSON3B_R2 0x0007 /* * Older processors used to encode processor version and revision in two @@ -335,34 +337,37 @@ enum cpu_type_enum { /* * CPU Option encodings */ -#define MIPS_CPU_TLB 0x00000001 /* CPU has TLB */ -#define MIPS_CPU_4KEX 0x00000002 /* "R4K" exception model */ -#define MIPS_CPU_3K_CACHE 0x00000004 /* R3000-style caches */ -#define MIPS_CPU_4K_CACHE 0x00000008 /* R4000-style caches */ -#define MIPS_CPU_TX39_CACHE 0x00000010 /* TX3900-style caches */ -#define MIPS_CPU_FPU 0x00000020 /* CPU has FPU */ -#define MIPS_CPU_32FPR 0x00000040 /* 32 dbl. prec. FP registers */ -#define MIPS_CPU_COUNTER 0x00000080 /* Cycle count/compare */ -#define MIPS_CPU_WATCH 0x00000100 /* watchpoint registers */ -#define MIPS_CPU_DIVEC 0x00000200 /* dedicated interrupt vector */ -#define MIPS_CPU_VCE 0x00000400 /* virt. coherence conflict possible */ -#define MIPS_CPU_CACHE_CDEX_P 0x00000800 /* Create_Dirty_Exclusive CACHE op */ -#define MIPS_CPU_CACHE_CDEX_S 0x00001000 /* ... same for seconary cache ... */ -#define MIPS_CPU_MCHECK 0x00002000 /* Machine check exception */ -#define MIPS_CPU_EJTAG 0x00004000 /* EJTAG exception */ -#define MIPS_CPU_NOFPUEX 0x00008000 /* no FPU exception */ -#define MIPS_CPU_LLSC 0x00010000 /* CPU has ll/sc instructions */ -#define MIPS_CPU_INCLUSIVE_CACHES 0x00020000 /* P-cache subset enforced */ -#define MIPS_CPU_PREFETCH 0x00040000 /* CPU has usable prefetch */ -#define MIPS_CPU_VINT 0x00080000 /* CPU supports MIPSR2 vectored interrupts */ -#define MIPS_CPU_VEIC 0x00100000 /* CPU supports MIPSR2 external interrupt controller mode */ -#define MIPS_CPU_ULRI 0x00200000 /* CPU has ULRI feature */ -#define MIPS_CPU_PCI 0x00400000 /* CPU has Perf Ctr Int indicator */ -#define MIPS_CPU_RIXI 0x00800000 /* CPU has TLB Read/eXec Inhibit */ -#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */ -#define MIPS_CPU_TLBINV 0x02000000 /* CPU supports TLBINV/F */ -#define MIPS_CPU_SEGMENTS 0x04000000 /* CPU supports Segmentation Control registers */ -#define MIPS_CPU_EVA 0x80000000 /* CPU supports Enhanced Virtual Addressing */ +#define MIPS_CPU_TLB 0x00000001ull /* CPU has TLB */ +#define MIPS_CPU_4KEX 0x00000002ull /* "R4K" exception model */ +#define MIPS_CPU_3K_CACHE 0x00000004ull /* R3000-style caches */ +#define MIPS_CPU_4K_CACHE 0x00000008ull /* R4000-style caches */ +#define MIPS_CPU_TX39_CACHE 0x00000010ull /* TX3900-style caches */ +#define MIPS_CPU_FPU 0x00000020ull /* CPU has FPU */ +#define MIPS_CPU_32FPR 0x00000040ull /* 32 dbl. prec. FP registers */ +#define MIPS_CPU_COUNTER 0x00000080ull /* Cycle count/compare */ +#define MIPS_CPU_WATCH 0x00000100ull /* watchpoint registers */ +#define MIPS_CPU_DIVEC 0x00000200ull /* dedicated interrupt vector */ +#define MIPS_CPU_VCE 0x00000400ull /* virt. coherence conflict possible */ +#define MIPS_CPU_CACHE_CDEX_P 0x00000800ull /* Create_Dirty_Exclusive CACHE op */ +#define MIPS_CPU_CACHE_CDEX_S 0x00001000ull /* ... same for seconary cache ... */ +#define MIPS_CPU_MCHECK 0x00002000ull /* Machine check exception */ +#define MIPS_CPU_EJTAG 0x00004000ull /* EJTAG exception */ +#define MIPS_CPU_NOFPUEX 0x00008000ull /* no FPU exception */ +#define MIPS_CPU_LLSC 0x00010000ull /* CPU has ll/sc instructions */ +#define MIPS_CPU_INCLUSIVE_CACHES 0x00020000ull /* P-cache subset enforced */ +#define MIPS_CPU_PREFETCH 0x00040000ull /* CPU has usable prefetch */ +#define MIPS_CPU_VINT 0x00080000ull /* CPU supports MIPSR2 vectored interrupts */ +#define MIPS_CPU_VEIC 0x00100000ull /* CPU supports MIPSR2 external interrupt controller mode */ +#define MIPS_CPU_ULRI 0x00200000ull /* CPU has ULRI feature */ +#define MIPS_CPU_PCI 0x00400000ull /* CPU has Perf Ctr Int indicator */ +#define MIPS_CPU_RIXI 0x00800000ull /* CPU has TLB Read/eXec Inhibit */ +#define MIPS_CPU_MICROMIPS 0x01000000ull /* CPU has microMIPS capability */ +#define MIPS_CPU_TLBINV 0x02000000ull /* CPU supports TLBINV/F */ +#define MIPS_CPU_SEGMENTS 0x04000000ull /* CPU supports Segmentation Control registers */ +#define MIPS_CPU_EVA 0x80000000ull /* CPU supports Enhanced Virtual Addressing */ +#define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */ +#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ +#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index d4144056e928..1d38fe0edd2d 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -339,23 +339,6 @@ do { \ #endif /* CONFIG_64BIT */ -struct pt_regs; -struct task_struct; - -extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); -extern int dump_task_regs(struct task_struct *, elf_gregset_t *); -extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); - -#ifndef ELF_CORE_COPY_REGS -#define ELF_CORE_COPY_REGS(elf_regs, regs) \ - elf_dump_regs((elf_greg_t *)&(elf_regs), regs); -#endif -#ifndef ELF_CORE_COPY_TASK_REGS -#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) -#endif -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \ - dump_task_fpu(tsk, elf_fpregs) - #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h new file mode 100644 index 000000000000..a3d1807f227c --- /dev/null +++ b/arch/mips/include/asm/eva.h @@ -0,0 +1,43 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014, Imagination Technologies Ltd. + * + * EVA functions for generic code + */ + +#ifndef _ASM_EVA_H +#define _ASM_EVA_H + +#include <kernel-entry-init.h> + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_EVA + +/* + * EVA early init code + * + * Platforms must define their own 'platform_eva_init' macro in + * their kernel-entry-init.h header. This macro usually does the + * platform specific configuration of the segmentation registers, + * and it is normally called from assembly code. + * + */ + +.macro eva_init +platform_eva_init +.endm + +#else + +.macro eva_init +.endm + +#endif /* CONFIG_EVA */ + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index a939574f8293..4d0aeda68397 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -21,6 +21,7 @@ #include <asm/hazards.h> #include <asm/processor.h> #include <asm/current.h> +#include <asm/msa.h> #ifdef CONFIG_MIPS_MT_FPAFF #include <asm/mips_mt.h> @@ -141,13 +142,21 @@ static inline int own_fpu(int restore) static inline void lose_fpu(int save) { preempt_disable(); - if (is_fpu_owner()) { + if (is_msa_enabled()) { + if (save) { + save_msa(current); + asm volatile("cfc1 %0, $31" + : "=r"(current->thread.fpu.fcr31)); + } + disable_msa(); + clear_thread_flag(TIF_USEDMSA); + } else if (is_fpu_owner()) { if (save) _save_fp(current); - KSTK_STATUS(current) &= ~ST0_CU1; - clear_thread_flag(TIF_USEDFPU); __disable_fpu(); } + KSTK_STATUS(current) &= ~ST0_CU1; + clear_thread_flag(TIF_USEDFPU); preempt_enable(); } @@ -155,8 +164,6 @@ static inline int init_fpu(void) { int ret = 0; - preempt_disable(); - if (cpu_has_fpu) { ret = __own_fpu(); if (!ret) @@ -164,8 +171,6 @@ static inline int init_fpu(void) } else fpu_emulator_init_fpu(); - preempt_enable(); - return ret; } diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 0195745b4b1b..3ee347713307 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -33,17 +33,17 @@ #ifdef CONFIG_DEBUG_FS struct mips_fpu_emulator_stats { - local_t emulated; - local_t loads; - local_t stores; - local_t cp1ops; - local_t cp1xops; - local_t errors; - local_t ieee754_inexact; - local_t ieee754_underflow; - local_t ieee754_overflow; - local_t ieee754_zerodiv; - local_t ieee754_invalidop; + unsigned long emulated; + unsigned long loads; + unsigned long stores; + unsigned long cp1ops; + unsigned long cp1xops; + unsigned long errors; + unsigned long ieee754_inexact; + unsigned long ieee754_underflow; + unsigned long ieee754_overflow; + unsigned long ieee754_zerodiv; + unsigned long ieee754_invalidop; }; DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); @@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); #define MIPS_FPU_EMU_INC_STATS(M) \ do { \ preempt_disable(); \ - __local_inc(&__get_cpu_var(fpuemustats).M); \ + __this_cpu_inc(fpuemustats.M); \ preempt_enable(); \ } while (0) diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 10f6a99f92c2..d7699cf7e135 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h @@ -14,6 +14,8 @@ #include <linux/bitmap.h> #include <linux/threads.h> +#include <irq.h> + #undef GICISBYTELITTLEENDIAN /* Constants */ @@ -22,8 +24,6 @@ #define GIC_TRIG_EDGE 1 #define GIC_TRIG_LEVEL 0 -#define GIC_NUM_INTRS (24 + NR_CPUS * 2) - #define MSK(n) ((1 << (n)) - 1) #define REG32(addr) (*(volatile unsigned int *) (addr)) #define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS) @@ -43,18 +43,17 @@ #ifdef GICISBYTELITTLEENDIAN #define GICREAD(reg, data) ((data) = (reg), (data) = le32_to_cpu(data)) #define GICWRITE(reg, data) ((reg) = cpu_to_le32(data)) -#define GICBIS(reg, bits) \ - ({unsigned int data; \ - GICREAD(reg, data); \ - data |= bits; \ - GICWRITE(reg, data); \ - }) - #else #define GICREAD(reg, data) ((data) = (reg)) #define GICWRITE(reg, data) ((reg) = (data)) -#define GICBIS(reg, bits) ((reg) |= (bits)) #endif +#define GICBIS(reg, mask, bits) \ + do { u32 data; \ + GICREAD(reg, data); \ + data &= ~(mask); \ + data |= ((bits) & (mask)); \ + GICWRITE((reg), data); \ + } while (0) /* GIC Address Space */ @@ -170,13 +169,15 @@ #define GIC_SH_SET_POLARITY_OFS 0x0100 #define GIC_SET_POLARITY(intr, pol) \ GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + \ - GIC_INTR_OFS(intr)), (pol) << GIC_INTR_BIT(intr)) + GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \ + (pol) << GIC_INTR_BIT(intr)) /* Triggering : Reset Value is always 0 */ #define GIC_SH_SET_TRIGGER_OFS 0x0180 #define GIC_SET_TRIGGER(intr, trig) \ GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + \ - GIC_INTR_OFS(intr)), (trig) << GIC_INTR_BIT(intr)) + GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \ + (trig) << GIC_INTR_BIT(intr)) /* Mask manipulation */ #define GIC_SH_SMASK_OFS 0x0380 @@ -306,18 +307,6 @@ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \ GIC_SH_MAP_TO_VPE_REG_BIT(vpe)) -struct gic_pcpu_mask { - DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS); -}; - -struct gic_pending_regs { - DECLARE_BITMAP(pending, GIC_NUM_INTRS); -}; - -struct gic_intrmask_regs { - DECLARE_BITMAP(intrmask, GIC_NUM_INTRS); -}; - /* * Interrupt Meta-data specification. The ipiflag helps * in building ipi_map. @@ -329,8 +318,7 @@ struct gic_intr_map { unsigned int polarity; /* Polarity : +/- */ unsigned int trigtype; /* Trigger : Edge/Levl */ unsigned int flags; /* Misc flags */ -#define GIC_FLAG_IPI 0x01 -#define GIC_FLAG_TRANSPARENT 0x02 +#define GIC_FLAG_TRANSPARENT 0x01 }; /* @@ -386,6 +374,7 @@ extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int); extern void gic_bind_eic_interrupt(int irq, int set); extern unsigned int gic_get_timer_pending(void); +extern void gic_get_int_mask(unsigned long *dst, const unsigned long *src); extern unsigned int gic_get_int(void); extern void gic_enable_interrupt(int irq_vec); extern void gic_disable_interrupt(int irq_vec); diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index ae1f7b24dd1a..39f07aec640c 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -26,6 +26,8 @@ static inline int irq_canonicalize(int irq) #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #endif +asmlinkage void plat_irq_dispatch(void); + extern void do_IRQ(unsigned int irq); extern void arch_init_irq(void); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index b0aa95565752..f2c249796ea8 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -96,11 +96,6 @@ #define CAUSEB_DC 27 #define CAUSEF_DC (_ULCAST_(1) << 27) -struct kvm; -struct kvm_run; -struct kvm_vcpu; -struct kvm_interrupt; - extern atomic_t kvm_mips_instance; extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); @@ -359,13 +354,17 @@ enum emulation_result { #define MIPS3_PG_FRAME 0x3fffffc0 #define VPN2_MASK 0xffffe000 -#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ +#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ ((x).tlb_lo1 & MIPS3_PG_G)) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) -#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ - ? ((x).tlb_lo1 & MIPS3_PG_V) \ +#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ + ? ((x).tlb_lo1 & MIPS3_PG_V) \ : ((x).tlb_lo0 & MIPS3_PG_V)) +#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ + ((y) & VPN2_MASK & ~(x).tlb_mask)) +#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ + TLB_ASID(x) == ((y) & ASID_MASK)) struct kvm_mips_tlb { long tlb_mask; @@ -760,8 +759,19 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu); /* Misc */ -extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); +extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #endif /* __MIPS_KVM_HOST_H__ */ diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h new file mode 100644 index 000000000000..6c62b0f899c0 --- /dev/null +++ b/arch/mips/include/asm/maar.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_MAAR_H__ +#define __MIPS_ASM_MIPS_MAAR_H__ + +#include <asm/hazards.h> +#include <asm/mipsregs.h> + +/** + * platform_maar_init() - perform platform-level MAAR configuration + * @num_pairs: The number of MAAR pairs present in the system. + * + * Platforms should implement this function such that it configures as many + * MAAR pairs as required, from 0 up to the maximum of num_pairs-1, and returns + * the number that were used. Any further MAARs will be configured to be + * invalid. The default implementation of this function will simply indicate + * that it has configured 0 MAAR pairs. + * + * Return: The number of MAAR pairs configured. + */ +unsigned __weak platform_maar_init(unsigned num_pairs); + +/** + * write_maar_pair() - write to a pair of MAARs + * @idx: The index of the pair (ie. use MAARs idx*2 & (idx*2)+1). + * @lower: The lowest address that the MAAR pair will affect. Must be + * aligned to a 2^16 byte boundary. + * @upper: The highest address that the MAAR pair will affect. Must be + * aligned to one byte before a 2^16 byte boundary. + * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The + * MIPS_MAAR_V attribute will automatically be set. + * + * Program the pair of MAAR registers specified by idx to apply the attributes + * specified by attrs to the range of addresses from lower to higher. + */ +static inline void write_maar_pair(unsigned idx, phys_addr_t lower, + phys_addr_t upper, unsigned attrs) +{ + /* Addresses begin at bit 16, but are shifted right 4 bits */ + BUG_ON(lower & (0xffff | ~(MIPS_MAAR_ADDR << 4))); + BUG_ON(((upper & 0xffff) != 0xffff) + || ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4))); + + /* Automatically set MIPS_MAAR_V */ + attrs |= MIPS_MAAR_V; + + /* Write the upper address & attributes (only MIPS_MAAR_V matters) */ + write_c0_maari(idx << 1); + back_to_back_c0_hazard(); + write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs); + back_to_back_c0_hazard(); + + /* Write the lower address & attributes */ + write_c0_maari((idx << 1) | 0x1); + back_to_back_c0_hazard(); + write_c0_maar((lower >> 4) | attrs); + back_to_back_c0_hazard(); +} + +/** + * struct maar_config - MAAR configuration data + * @lower: The lowest address that the MAAR pair will affect. Must be + * aligned to a 2^16 byte boundary. + * @upper: The highest address that the MAAR pair will affect. Must be + * aligned to one byte before a 2^16 byte boundary. + * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The + * MIPS_MAAR_V attribute will automatically be set. + * + * Describes the configuration of a pair of Memory Accessibility Attribute + * Registers - applying attributes from attrs to the range of physical + * addresses from lower to upper inclusive. + */ +struct maar_config { + phys_addr_t lower; + phys_addr_t upper; + unsigned attrs; +}; + +/** + * maar_config() - configure MAARs according to provided data + * @cfg: Pointer to an array of struct maar_config. + * @num_cfg: The number of structs in the cfg array. + * @num_pairs: The number of MAAR pairs present in the system. + * + * Configures as many MAARs as are present and specified in the cfg + * array with the values taken from the cfg array. + * + * Return: The number of MAAR pairs configured. + */ +static inline unsigned maar_config(const struct maar_config *cfg, + unsigned num_cfg, unsigned num_pairs) +{ + unsigned i; + + for (i = 0; i < min(num_cfg, num_pairs); i++) + write_maar_pair(i, cfg[i].lower, cfg[i].upper, cfg[i].attrs); + + return i; +} + +#endif /* __MIPS_ASM_MIPS_MAAR_H__ */ diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h index b4c3ecb17d48..a7eec3364a64 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/arch/mips/include/asm/mach-au1x00/au1000.h @@ -34,6 +34,558 @@ #ifndef _AU1000_H_ #define _AU1000_H_ +/* SOC Interrupt numbers */ +/* Au1000-style (IC0/1): 2 controllers with 32 sources each */ +#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8) +#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31) +#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_LAST + 1) +#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31) +#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST + +/* Au1300-style (GPIC): 1 controller with up to 128 sources */ +#define ALCHEMY_GPIC_INT_BASE (MIPS_CPU_IRQ_BASE + 8) +#define ALCHEMY_GPIC_INT_NUM 128 +#define ALCHEMY_GPIC_INT_LAST (ALCHEMY_GPIC_INT_BASE + ALCHEMY_GPIC_INT_NUM - 1) + +/* common clock names, shared among all variants. AUXPLL2 is Au1300 */ +#define ALCHEMY_ROOT_CLK "root_clk" +#define ALCHEMY_CPU_CLK "cpu_clk" +#define ALCHEMY_AUXPLL_CLK "auxpll_clk" +#define ALCHEMY_AUXPLL2_CLK "auxpll2_clk" +#define ALCHEMY_SYSBUS_CLK "sysbus_clk" +#define ALCHEMY_PERIPH_CLK "periph_clk" +#define ALCHEMY_MEM_CLK "mem_clk" +#define ALCHEMY_LR_CLK "lr_clk" +#define ALCHEMY_FG0_CLK "fg0_clk" +#define ALCHEMY_FG1_CLK "fg1_clk" +#define ALCHEMY_FG2_CLK "fg2_clk" +#define ALCHEMY_FG3_CLK "fg3_clk" +#define ALCHEMY_FG4_CLK "fg4_clk" +#define ALCHEMY_FG5_CLK "fg5_clk" + +/* Au1300 peripheral interrupt numbers */ +#define AU1300_FIRST_INT (ALCHEMY_GPIC_INT_BASE) +#define AU1300_UART1_INT (AU1300_FIRST_INT + 17) +#define AU1300_UART2_INT (AU1300_FIRST_INT + 25) +#define AU1300_UART3_INT (AU1300_FIRST_INT + 27) +#define AU1300_SD1_INT (AU1300_FIRST_INT + 32) +#define AU1300_SD2_INT (AU1300_FIRST_INT + 38) +#define AU1300_PSC0_INT (AU1300_FIRST_INT + 48) +#define AU1300_PSC1_INT (AU1300_FIRST_INT + 52) +#define AU1300_PSC2_INT (AU1300_FIRST_INT + 56) +#define AU1300_PSC3_INT (AU1300_FIRST_INT + 60) +#define AU1300_NAND_INT (AU1300_FIRST_INT + 62) +#define AU1300_DDMA_INT (AU1300_FIRST_INT + 75) +#define AU1300_MMU_INT (AU1300_FIRST_INT + 76) +#define AU1300_MPU_INT (AU1300_FIRST_INT + 77) +#define AU1300_GPU_INT (AU1300_FIRST_INT + 78) +#define AU1300_UDMA_INT (AU1300_FIRST_INT + 79) +#define AU1300_TOY_INT (AU1300_FIRST_INT + 80) +#define AU1300_TOY_MATCH0_INT (AU1300_FIRST_INT + 81) +#define AU1300_TOY_MATCH1_INT (AU1300_FIRST_INT + 82) +#define AU1300_TOY_MATCH2_INT (AU1300_FIRST_INT + 83) +#define AU1300_RTC_INT (AU1300_FIRST_INT + 84) +#define AU1300_RTC_MATCH0_INT (AU1300_FIRST_INT + 85) +#define AU1300_RTC_MATCH1_INT (AU1300_FIRST_INT + 86) +#define AU1300_RTC_MATCH2_INT (AU1300_FIRST_INT + 87) +#define AU1300_UART0_INT (AU1300_FIRST_INT + 88) +#define AU1300_SD0_INT (AU1300_FIRST_INT + 89) +#define AU1300_USB_INT (AU1300_FIRST_INT + 90) +#define AU1300_LCD_INT (AU1300_FIRST_INT + 91) +#define AU1300_BSA_INT (AU1300_FIRST_INT + 92) +#define AU1300_MPE_INT (AU1300_FIRST_INT + 93) +#define AU1300_ITE_INT (AU1300_FIRST_INT + 94) +#define AU1300_AES_INT (AU1300_FIRST_INT + 95) +#define AU1300_CIM_INT (AU1300_FIRST_INT + 96) + +/**********************************************************************/ + +/* + * Physical base addresses for integrated peripherals + * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 5..au1300 + */ + +#define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ +#define AU1300_ROM_PHYS_ADDR 0x10000000 /* 5 */ +#define AU1300_OTP_PHYS_ADDR 0x10002000 /* 5 */ +#define AU1300_VSS_PHYS_ADDR 0x10003000 /* 5 */ +#define AU1300_UART0_PHYS_ADDR 0x10100000 /* 5 */ +#define AU1300_UART1_PHYS_ADDR 0x10101000 /* 5 */ +#define AU1300_UART2_PHYS_ADDR 0x10102000 /* 5 */ +#define AU1300_UART3_PHYS_ADDR 0x10103000 /* 5 */ +#define AU1000_USB_OHCI_PHYS_ADDR 0x10100000 /* 012 */ +#define AU1000_USB_UDC_PHYS_ADDR 0x10200000 /* 0123 */ +#define AU1300_GPIC_PHYS_ADDR 0x10200000 /* 5 */ +#define AU1000_IRDA_PHYS_ADDR 0x10300000 /* 02 */ +#define AU1200_AES_PHYS_ADDR 0x10300000 /* 45 */ +#define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ +#define AU1300_GPU_PHYS_ADDR 0x10500000 /* 5 */ +#define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ +#define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ +#define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ +#define AU1100_SD0_PHYS_ADDR 0x10600000 /* 245 */ +#define AU1300_SD1_PHYS_ADDR 0x10601000 /* 5 */ +#define AU1300_SD2_PHYS_ADDR 0x10602000 /* 5 */ +#define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ +#define AU1300_SYS_PHYS_ADDR 0x10900000 /* 5 */ +#define AU1550_PSC2_PHYS_ADDR 0x10A00000 /* 3 */ +#define AU1550_PSC3_PHYS_ADDR 0x10B00000 /* 3 */ +#define AU1300_PSC0_PHYS_ADDR 0x10A00000 /* 5 */ +#define AU1300_PSC1_PHYS_ADDR 0x10A01000 /* 5 */ +#define AU1300_PSC2_PHYS_ADDR 0x10A02000 /* 5 */ +#define AU1300_PSC3_PHYS_ADDR 0x10A03000 /* 5 */ +#define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ +#define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ +#define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ +#define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ +#define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ +#define AU1200_SWCNT_PHYS_ADDR 0x1110010C /* 4 */ +#define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ +#define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ +#define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ +#define AU1000_SSI0_PHYS_ADDR 0x11600000 /* 02 */ +#define AU1000_SSI1_PHYS_ADDR 0x11680000 /* 02 */ +#define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ +#define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ +#define AU1000_SYS_PHYS_ADDR 0x11900000 /* 012345 */ +#define AU1550_PSC0_PHYS_ADDR 0x11A00000 /* 34 */ +#define AU1550_PSC1_PHYS_ADDR 0x11B00000 /* 34 */ +#define AU1000_MEM_PHYS_ADDR 0x14000000 /* 01234 */ +#define AU1000_STATIC_MEM_PHYS_ADDR 0x14001000 /* 01234 */ +#define AU1300_UDMA_PHYS_ADDR 0x14001800 /* 5 */ +#define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ +#define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 345 */ +#define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 345 */ +#define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ +#define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ +#define AU1200_CIM_PHYS_ADDR 0x14004000 /* 45 */ +#define AU1500_PCI_PHYS_ADDR 0x14005000 /* 13 */ +#define AU1550_PE_PHYS_ADDR 0x14008000 /* 3 */ +#define AU1200_MAEBE_PHYS_ADDR 0x14010000 /* 4 */ +#define AU1200_MAEFE_PHYS_ADDR 0x14012000 /* 4 */ +#define AU1300_MAEITE_PHYS_ADDR 0x14010000 /* 5 */ +#define AU1300_MAEMPE_PHYS_ADDR 0x14014000 /* 5 */ +#define AU1550_USB_OHCI_PHYS_ADDR 0x14020000 /* 3 */ +#define AU1200_USB_CTL_PHYS_ADDR 0x14020000 /* 4 */ +#define AU1200_USB_OTG_PHYS_ADDR 0x14020020 /* 4 */ +#define AU1200_USB_OHCI_PHYS_ADDR 0x14020100 /* 4 */ +#define AU1200_USB_EHCI_PHYS_ADDR 0x14020200 /* 4 */ +#define AU1200_USB_UDC_PHYS_ADDR 0x14022000 /* 4 */ +#define AU1300_USB_EHCI_PHYS_ADDR 0x14020000 /* 5 */ +#define AU1300_USB_OHCI0_PHYS_ADDR 0x14020400 /* 5 */ +#define AU1300_USB_OHCI1_PHYS_ADDR 0x14020800 /* 5 */ +#define AU1300_USB_CTL_PHYS_ADDR 0x14021000 /* 5 */ +#define AU1300_USB_OTG_PHYS_ADDR 0x14022000 /* 5 */ +#define AU1300_MAEBSA_PHYS_ADDR 0x14030000 /* 5 */ +#define AU1100_LCD_PHYS_ADDR 0x15000000 /* 2 */ +#define AU1200_LCD_PHYS_ADDR 0x15000000 /* 45 */ +#define AU1500_PCI_MEM_PHYS_ADDR 0x400000000ULL /* 13 */ +#define AU1500_PCI_IO_PHYS_ADDR 0x500000000ULL /* 13 */ +#define AU1500_PCI_CONFIG0_PHYS_ADDR 0x600000000ULL /* 13 */ +#define AU1500_PCI_CONFIG1_PHYS_ADDR 0x680000000ULL /* 13 */ +#define AU1000_PCMCIA_IO_PHYS_ADDR 0xF00000000ULL /* 012345 */ +#define AU1000_PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL /* 012345 */ +#define AU1000_PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL /* 012345 */ + +/**********************************************************************/ + + +/* + * Au1300 GPIO+INT controller (GPIC) register offsets and bits + * Registers are 128bits (0x10 bytes), divided into 4 "banks". + */ +#define AU1300_GPIC_PINVAL 0x0000 +#define AU1300_GPIC_PINVALCLR 0x0010 +#define AU1300_GPIC_IPEND 0x0020 +#define AU1300_GPIC_PRIENC 0x0030 +#define AU1300_GPIC_IEN 0x0040 /* int_mask in manual */ +#define AU1300_GPIC_IDIS 0x0050 /* int_maskclr in manual */ +#define AU1300_GPIC_DMASEL 0x0060 +#define AU1300_GPIC_DEVSEL 0x0080 +#define AU1300_GPIC_DEVCLR 0x0090 +#define AU1300_GPIC_RSTVAL 0x00a0 +/* pin configuration space. one 32bit register for up to 128 IRQs */ +#define AU1300_GPIC_PINCFG 0x1000 + +#define GPIC_GPIO_TO_BIT(gpio) \ + (1 << ((gpio) & 0x1f)) + +#define GPIC_GPIO_BANKOFF(gpio) \ + (((gpio) >> 5) * 4) + +/* Pin Control bits: who owns the pin, what does it do */ +#define GPIC_CFG_PC_GPIN 0 +#define GPIC_CFG_PC_DEV 1 +#define GPIC_CFG_PC_GPOLOW 2 +#define GPIC_CFG_PC_GPOHIGH 3 +#define GPIC_CFG_PC_MASK 3 + +/* assign pin to MIPS IRQ line */ +#define GPIC_CFG_IL_SET(x) (((x) & 3) << 2) +#define GPIC_CFG_IL_MASK (3 << 2) + +/* pin interrupt type setup */ +#define GPIC_CFG_IC_OFF (0 << 4) +#define GPIC_CFG_IC_LEVEL_LOW (1 << 4) +#define GPIC_CFG_IC_LEVEL_HIGH (2 << 4) +#define GPIC_CFG_IC_EDGE_FALL (5 << 4) +#define GPIC_CFG_IC_EDGE_RISE (6 << 4) +#define GPIC_CFG_IC_EDGE_BOTH (7 << 4) +#define GPIC_CFG_IC_MASK (7 << 4) + +/* allow interrupt to wake cpu from 'wait' */ +#define GPIC_CFG_IDLEWAKE (1 << 7) + +/***********************************************************************/ + +/* Au1000 SDRAM memory controller register offsets */ +#define AU1000_MEM_SDMODE0 0x0000 +#define AU1000_MEM_SDMODE1 0x0004 +#define AU1000_MEM_SDMODE2 0x0008 +#define AU1000_MEM_SDADDR0 0x000C +#define AU1000_MEM_SDADDR1 0x0010 +#define AU1000_MEM_SDADDR2 0x0014 +#define AU1000_MEM_SDREFCFG 0x0018 +#define AU1000_MEM_SDPRECMD 0x001C +#define AU1000_MEM_SDAUTOREF 0x0020 +#define AU1000_MEM_SDWRMD0 0x0024 +#define AU1000_MEM_SDWRMD1 0x0028 +#define AU1000_MEM_SDWRMD2 0x002C +#define AU1000_MEM_SDSLEEP 0x0030 +#define AU1000_MEM_SDSMCKE 0x0034 + +/* MEM_SDMODE register content definitions */ +#define MEM_SDMODE_F (1 << 22) +#define MEM_SDMODE_SR (1 << 21) +#define MEM_SDMODE_BS (1 << 20) +#define MEM_SDMODE_RS (3 << 18) +#define MEM_SDMODE_CS (7 << 15) +#define MEM_SDMODE_TRAS (15 << 11) +#define MEM_SDMODE_TMRD (3 << 9) +#define MEM_SDMODE_TWR (3 << 7) +#define MEM_SDMODE_TRP (3 << 5) +#define MEM_SDMODE_TRCD (3 << 3) +#define MEM_SDMODE_TCL (7 << 0) + +#define MEM_SDMODE_BS_2Bank (0 << 20) +#define MEM_SDMODE_BS_4Bank (1 << 20) +#define MEM_SDMODE_RS_11Row (0 << 18) +#define MEM_SDMODE_RS_12Row (1 << 18) +#define MEM_SDMODE_RS_13Row (2 << 18) +#define MEM_SDMODE_RS_N(N) ((N) << 18) +#define MEM_SDMODE_CS_7Col (0 << 15) +#define MEM_SDMODE_CS_8Col (1 << 15) +#define MEM_SDMODE_CS_9Col (2 << 15) +#define MEM_SDMODE_CS_10Col (3 << 15) +#define MEM_SDMODE_CS_11Col (4 << 15) +#define MEM_SDMODE_CS_N(N) ((N) << 15) +#define MEM_SDMODE_TRAS_N(N) ((N) << 11) +#define MEM_SDMODE_TMRD_N(N) ((N) << 9) +#define MEM_SDMODE_TWR_N(N) ((N) << 7) +#define MEM_SDMODE_TRP_N(N) ((N) << 5) +#define MEM_SDMODE_TRCD_N(N) ((N) << 3) +#define MEM_SDMODE_TCL_N(N) ((N) << 0) + +/* MEM_SDADDR register contents definitions */ +#define MEM_SDADDR_E (1 << 20) +#define MEM_SDADDR_CSBA (0x03FF << 10) +#define MEM_SDADDR_CSMASK (0x03FF << 0) +#define MEM_SDADDR_CSBA_N(N) ((N) & (0x03FF << 22) >> 12) +#define MEM_SDADDR_CSMASK_N(N) ((N)&(0x03FF << 22) >> 22) + +/* MEM_SDREFCFG register content definitions */ +#define MEM_SDREFCFG_TRC (15 << 28) +#define MEM_SDREFCFG_TRPM (3 << 26) +#define MEM_SDREFCFG_E (1 << 25) +#define MEM_SDREFCFG_RE (0x1ffffff << 0) +#define MEM_SDREFCFG_TRC_N(N) ((N) << MEM_SDREFCFG_TRC) +#define MEM_SDREFCFG_TRPM_N(N) ((N) << MEM_SDREFCFG_TRPM) +#define MEM_SDREFCFG_REF_N(N) (N) + +/* Au1550 SDRAM Register Offsets */ +#define AU1550_MEM_SDMODE0 0x0800 +#define AU1550_MEM_SDMODE1 0x0808 +#define AU1550_MEM_SDMODE2 0x0810 +#define AU1550_MEM_SDADDR0 0x0820 +#define AU1550_MEM_SDADDR1 0x0828 +#define AU1550_MEM_SDADDR2 0x0830 +#define AU1550_MEM_SDCONFIGA 0x0840 +#define AU1550_MEM_SDCONFIGB 0x0848 +#define AU1550_MEM_SDSTAT 0x0850 +#define AU1550_MEM_SDERRADDR 0x0858 +#define AU1550_MEM_SDSTRIDE0 0x0860 +#define AU1550_MEM_SDSTRIDE1 0x0868 +#define AU1550_MEM_SDSTRIDE2 0x0870 +#define AU1550_MEM_SDWRMD0 0x0880 +#define AU1550_MEM_SDWRMD1 0x0888 +#define AU1550_MEM_SDWRMD2 0x0890 +#define AU1550_MEM_SDPRECMD 0x08C0 +#define AU1550_MEM_SDAUTOREF 0x08C8 +#define AU1550_MEM_SDSREF 0x08D0 +#define AU1550_MEM_SDSLEEP MEM_SDSREF + +/* Static Bus Controller register offsets */ +#define AU1000_MEM_STCFG0 0x000 +#define AU1000_MEM_STTIME0 0x004 +#define AU1000_MEM_STADDR0 0x008 +#define AU1000_MEM_STCFG1 0x010 +#define AU1000_MEM_STTIME1 0x014 +#define AU1000_MEM_STADDR1 0x018 +#define AU1000_MEM_STCFG2 0x020 +#define AU1000_MEM_STTIME2 0x024 +#define AU1000_MEM_STADDR2 0x028 +#define AU1000_MEM_STCFG3 0x030 +#define AU1000_MEM_STTIME3 0x034 +#define AU1000_MEM_STADDR3 0x038 +#define AU1000_MEM_STNDCTL 0x100 +#define AU1000_MEM_STSTAT 0x104 + +#define MEM_STNAND_CMD 0x0 +#define MEM_STNAND_ADDR 0x4 +#define MEM_STNAND_DATA 0x20 + + +/* Programmable Counters 0 and 1 */ +#define AU1000_SYS_CNTRCTRL 0x14 +# define SYS_CNTRL_E1S (1 << 23) +# define SYS_CNTRL_T1S (1 << 20) +# define SYS_CNTRL_M21 (1 << 19) +# define SYS_CNTRL_M11 (1 << 18) +# define SYS_CNTRL_M01 (1 << 17) +# define SYS_CNTRL_C1S (1 << 16) +# define SYS_CNTRL_BP (1 << 14) +# define SYS_CNTRL_EN1 (1 << 13) +# define SYS_CNTRL_BT1 (1 << 12) +# define SYS_CNTRL_EN0 (1 << 11) +# define SYS_CNTRL_BT0 (1 << 10) +# define SYS_CNTRL_E0 (1 << 8) +# define SYS_CNTRL_E0S (1 << 7) +# define SYS_CNTRL_32S (1 << 5) +# define SYS_CNTRL_T0S (1 << 4) +# define SYS_CNTRL_M20 (1 << 3) +# define SYS_CNTRL_M10 (1 << 2) +# define SYS_CNTRL_M00 (1 << 1) +# define SYS_CNTRL_C0S (1 << 0) + +/* Programmable Counter 0 Registers */ +#define AU1000_SYS_TOYTRIM 0x00 +#define AU1000_SYS_TOYWRITE 0x04 +#define AU1000_SYS_TOYMATCH0 0x08 +#define AU1000_SYS_TOYMATCH1 0x0c +#define AU1000_SYS_TOYMATCH2 0x10 +#define AU1000_SYS_TOYREAD 0x40 + +/* Programmable Counter 1 Registers */ +#define AU1000_SYS_RTCTRIM 0x44 +#define AU1000_SYS_RTCWRITE 0x48 +#define AU1000_SYS_RTCMATCH0 0x4c +#define AU1000_SYS_RTCMATCH1 0x50 +#define AU1000_SYS_RTCMATCH2 0x54 +#define AU1000_SYS_RTCREAD 0x58 + + +/* GPIO */ +#define AU1000_SYS_PINFUNC 0x2C +# define SYS_PF_USB (1 << 15) /* 2nd USB device/host */ +# define SYS_PF_U3 (1 << 14) /* GPIO23/U3TXD */ +# define SYS_PF_U2 (1 << 13) /* GPIO22/U2TXD */ +# define SYS_PF_U1 (1 << 12) /* GPIO21/U1TXD */ +# define SYS_PF_SRC (1 << 11) /* GPIO6/SROMCKE */ +# define SYS_PF_CK5 (1 << 10) /* GPIO3/CLK5 */ +# define SYS_PF_CK4 (1 << 9) /* GPIO2/CLK4 */ +# define SYS_PF_IRF (1 << 8) /* GPIO15/IRFIRSEL */ +# define SYS_PF_UR3 (1 << 7) /* GPIO[14:9]/UART3 */ +# define SYS_PF_I2D (1 << 6) /* GPIO8/I2SDI */ +# define SYS_PF_I2S (1 << 5) /* I2S/GPIO[29:31] */ +# define SYS_PF_NI2 (1 << 4) /* NI2/GPIO[24:28] */ +# define SYS_PF_U0 (1 << 3) /* U0TXD/GPIO20 */ +# define SYS_PF_RD (1 << 2) /* IRTXD/GPIO19 */ +# define SYS_PF_A97 (1 << 1) /* AC97/SSL1 */ +# define SYS_PF_S0 (1 << 0) /* SSI_0/GPIO[16:18] */ + +/* Au1100 only */ +# define SYS_PF_PC (1 << 18) /* PCMCIA/GPIO[207:204] */ +# define SYS_PF_LCD (1 << 17) /* extern lcd/GPIO[203:200] */ +# define SYS_PF_CS (1 << 16) /* EXTCLK0/32KHz to gpio2 */ +# define SYS_PF_EX0 (1 << 9) /* GPIO2/clock */ + +/* Au1550 only. Redefines lots of pins */ +# define SYS_PF_PSC2_MASK (7 << 17) +# define SYS_PF_PSC2_AC97 0 +# define SYS_PF_PSC2_SPI 0 +# define SYS_PF_PSC2_I2S (1 << 17) +# define SYS_PF_PSC2_SMBUS (3 << 17) +# define SYS_PF_PSC2_GPIO (7 << 17) +# define SYS_PF_PSC3_MASK (7 << 20) +# define SYS_PF_PSC3_AC97 0 +# define SYS_PF_PSC3_SPI 0 +# define SYS_PF_PSC3_I2S (1 << 20) +# define SYS_PF_PSC3_SMBUS (3 << 20) +# define SYS_PF_PSC3_GPIO (7 << 20) +# define SYS_PF_PSC1_S1 (1 << 1) +# define SYS_PF_MUST_BE_SET ((1 << 5) | (1 << 2)) + +/* Au1200 only */ +#define SYS_PINFUNC_DMA (1 << 31) +#define SYS_PINFUNC_S0A (1 << 30) +#define SYS_PINFUNC_S1A (1 << 29) +#define SYS_PINFUNC_LP0 (1 << 28) +#define SYS_PINFUNC_LP1 (1 << 27) +#define SYS_PINFUNC_LD16 (1 << 26) +#define SYS_PINFUNC_LD8 (1 << 25) +#define SYS_PINFUNC_LD1 (1 << 24) +#define SYS_PINFUNC_LD0 (1 << 23) +#define SYS_PINFUNC_P1A (3 << 21) +#define SYS_PINFUNC_P1B (1 << 20) +#define SYS_PINFUNC_FS3 (1 << 19) +#define SYS_PINFUNC_P0A (3 << 17) +#define SYS_PINFUNC_CS (1 << 16) +#define SYS_PINFUNC_CIM (1 << 15) +#define SYS_PINFUNC_P1C (1 << 14) +#define SYS_PINFUNC_U1T (1 << 12) +#define SYS_PINFUNC_U1R (1 << 11) +#define SYS_PINFUNC_EX1 (1 << 10) +#define SYS_PINFUNC_EX0 (1 << 9) +#define SYS_PINFUNC_U0R (1 << 8) +#define SYS_PINFUNC_MC (1 << 7) +#define SYS_PINFUNC_S0B (1 << 6) +#define SYS_PINFUNC_S0C (1 << 5) +#define SYS_PINFUNC_P0B (1 << 4) +#define SYS_PINFUNC_U0T (1 << 3) +#define SYS_PINFUNC_S1B (1 << 2) + +/* Power Management */ +#define AU1000_SYS_SCRATCH0 0x18 +#define AU1000_SYS_SCRATCH1 0x1c +#define AU1000_SYS_WAKEMSK 0x34 +#define AU1000_SYS_ENDIAN 0x38 +#define AU1000_SYS_POWERCTRL 0x3c +#define AU1000_SYS_WAKESRC 0x5c +#define AU1000_SYS_SLPPWR 0x78 +#define AU1000_SYS_SLEEP 0x7c + +#define SYS_WAKEMSK_D2 (1 << 9) +#define SYS_WAKEMSK_M2 (1 << 8) +#define SYS_WAKEMSK_GPIO(x) (1 << (x)) + +/* Clock Controller */ +#define AU1000_SYS_FREQCTRL0 0x20 +#define AU1000_SYS_FREQCTRL1 0x24 +#define AU1000_SYS_CLKSRC 0x28 +#define AU1000_SYS_CPUPLL 0x60 +#define AU1000_SYS_AUXPLL 0x64 +#define AU1300_SYS_AUXPLL2 0x68 + + +/**********************************************************************/ + + +/* The PCI chip selects are outside the 32bit space, and since we can't + * just program the 36bit addresses into BARs, we have to take a chunk + * out of the 32bit space and reserve it for PCI. When these addresses + * are ioremap()ed, they'll be fixed up to the real 36bit address before + * being passed to the real ioremap function. + */ +#define ALCHEMY_PCI_MEMWIN_START (AU1500_PCI_MEM_PHYS_ADDR >> 4) +#define ALCHEMY_PCI_MEMWIN_END (ALCHEMY_PCI_MEMWIN_START + 0x0FFFFFFF) + +/* for PCI IO it's simpler because we get to do the ioremap ourselves and then + * adjust the device's resources. + */ +#define ALCHEMY_PCI_IOWIN_START 0x00001000 +#define ALCHEMY_PCI_IOWIN_END 0x0000FFFF + +#ifdef CONFIG_PCI + +#define IOPORT_RESOURCE_START 0x00001000 /* skip legacy probing */ +#define IOPORT_RESOURCE_END 0xffffffff +#define IOMEM_RESOURCE_START 0x10000000 +#define IOMEM_RESOURCE_END 0xfffffffffULL + +#else + +/* Don't allow any legacy ports probing */ +#define IOPORT_RESOURCE_START 0x10000000 +#define IOPORT_RESOURCE_END 0xffffffff +#define IOMEM_RESOURCE_START 0x10000000 +#define IOMEM_RESOURCE_END 0xfffffffffULL + +#endif + +/* PCI controller block register offsets */ +#define PCI_REG_CMEM 0x0000 +#define PCI_REG_CONFIG 0x0004 +#define PCI_REG_B2BMASK_CCH 0x0008 +#define PCI_REG_B2BBASE0_VID 0x000C +#define PCI_REG_B2BBASE1_SID 0x0010 +#define PCI_REG_MWMASK_DEV 0x0014 +#define PCI_REG_MWBASE_REV_CCL 0x0018 +#define PCI_REG_ERR_ADDR 0x001C +#define PCI_REG_SPEC_INTACK 0x0020 +#define PCI_REG_ID 0x0100 +#define PCI_REG_STATCMD 0x0104 +#define PCI_REG_CLASSREV 0x0108 +#define PCI_REG_PARAM 0x010C +#define PCI_REG_MBAR 0x0110 +#define PCI_REG_TIMEOUT 0x0140 + +/* PCI controller block register bits */ +#define PCI_CMEM_E (1 << 28) /* enable cacheable memory */ +#define PCI_CMEM_CMBASE(x) (((x) & 0x3fff) << 14) +#define PCI_CMEM_CMMASK(x) ((x) & 0x3fff) +#define PCI_CONFIG_ERD (1 << 27) /* pci error during R/W */ +#define PCI_CONFIG_ET (1 << 26) /* error in target mode */ +#define PCI_CONFIG_EF (1 << 25) /* fatal error */ +#define PCI_CONFIG_EP (1 << 24) /* parity error */ +#define PCI_CONFIG_EM (1 << 23) /* multiple errors */ +#define PCI_CONFIG_BM (1 << 22) /* bad master error */ +#define PCI_CONFIG_PD (1 << 20) /* PCI Disable */ +#define PCI_CONFIG_BME (1 << 19) /* Byte Mask Enable for reads */ +#define PCI_CONFIG_NC (1 << 16) /* mark mem access non-coherent */ +#define PCI_CONFIG_IA (1 << 15) /* INTA# enabled (target mode) */ +#define PCI_CONFIG_IP (1 << 13) /* int on PCI_PERR# */ +#define PCI_CONFIG_IS (1 << 12) /* int on PCI_SERR# */ +#define PCI_CONFIG_IMM (1 << 11) /* int on master abort */ +#define PCI_CONFIG_ITM (1 << 10) /* int on target abort (as master) */ +#define PCI_CONFIG_ITT (1 << 9) /* int on target abort (as target) */ +#define PCI_CONFIG_IPB (1 << 8) /* int on PERR# in bus master acc */ +#define PCI_CONFIG_SIC_NO (0 << 6) /* no byte mask changes */ +#define PCI_CONFIG_SIC_BA_ADR (1 << 6) /* on byte/hw acc, invert adr bits */ +#define PCI_CONFIG_SIC_HWA_DAT (2 << 6) /* on halfword acc, swap data */ +#define PCI_CONFIG_SIC_ALL (3 << 6) /* swap data bytes on all accesses */ +#define PCI_CONFIG_ST (1 << 5) /* swap data by target transactions */ +#define PCI_CONFIG_SM (1 << 4) /* swap data from PCI ctl */ +#define PCI_CONFIG_AEN (1 << 3) /* enable internal arbiter */ +#define PCI_CONFIG_R2H (1 << 2) /* REQ2# to hi-prio arbiter */ +#define PCI_CONFIG_R1H (1 << 1) /* REQ1# to hi-prio arbiter */ +#define PCI_CONFIG_CH (1 << 0) /* PCI ctl to hi-prio arbiter */ +#define PCI_B2BMASK_B2BMASK(x) (((x) & 0xffff) << 16) +#define PCI_B2BMASK_CCH(x) ((x) & 0xffff) /* 16 upper bits of class code */ +#define PCI_B2BBASE0_VID_B0(x) (((x) & 0xffff) << 16) +#define PCI_B2BBASE0_VID_SV(x) ((x) & 0xffff) +#define PCI_B2BBASE1_SID_B1(x) (((x) & 0xffff) << 16) +#define PCI_B2BBASE1_SID_SI(x) ((x) & 0xffff) +#define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16) +#define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff) +#define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16) +#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8) +#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff) +#define PCI_ID_DID(x) (((x) & 0xffff) << 16) +#define PCI_ID_VID(x) ((x) & 0xffff) +#define PCI_STATCMD_STATUS(x) (((x) & 0xffff) << 16) +#define PCI_STATCMD_CMD(x) ((x) & 0xffff) +#define PCI_CLASSREV_CLASS(x) (((x) & 0x00ffffff) << 8) +#define PCI_CLASSREV_REV(x) ((x) & 0xff) +#define PCI_PARAM_BIST(x) (((x) & 0xff) << 24) +#define PCI_PARAM_HT(x) (((x) & 0xff) << 16) +#define PCI_PARAM_LT(x) (((x) & 0xff) << 8) +#define PCI_PARAM_CLS(x) ((x) & 0xff) +#define PCI_TIMEOUT_RETRIES(x) (((x) & 0xff) << 8) /* max retries */ +#define PCI_TIMEOUT_TO(x) ((x) & 0xff) /* target ready timeout */ + + +/**********************************************************************/ + #ifndef _LANGUAGE_ASSEMBLY @@ -45,52 +597,36 @@ #include <asm/cpu.h> -/* cpu pipeline flush */ -void static inline au_sync(void) +/* helpers to access the SYS_* registers */ +static inline unsigned long alchemy_rdsys(int regofs) { - __asm__ volatile ("sync"); -} + void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); -void static inline au_sync_udelay(int us) -{ - __asm__ volatile ("sync"); - udelay(us); + return __raw_readl(b + regofs); } -void static inline au_sync_delay(int ms) +static inline void alchemy_wrsys(unsigned long v, int regofs) { - __asm__ volatile ("sync"); - mdelay(ms); -} + void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); -void static inline au_writeb(u8 val, unsigned long reg) -{ - *(volatile u8 *)reg = val; + __raw_writel(v, b + regofs); + wmb(); /* drain writebuffer */ } -void static inline au_writew(u16 val, unsigned long reg) +/* helpers to access static memctrl registers */ +static inline unsigned long alchemy_rdsmem(int regofs) { - *(volatile u16 *)reg = val; -} + void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR); -void static inline au_writel(u32 val, unsigned long reg) -{ - *(volatile u32 *)reg = val; + return __raw_readl(b + regofs); } -static inline u8 au_readb(unsigned long reg) +static inline void alchemy_wrsmem(unsigned long v, int regofs) { - return *(volatile u8 *)reg; -} + void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR); -static inline u16 au_readw(unsigned long reg) -{ - return *(volatile u16 *)reg; -} - -static inline u32 au_readl(unsigned long reg) -{ - return *(volatile u32 *)reg; + __raw_writel(v, b + regofs); + wmb(); /* drain writebuffer */ } /* Early Au1000 have a write-only SYS_CPUPLL register. */ @@ -192,19 +728,20 @@ static inline void alchemy_uart_enable(u32 uart_phys) /* reset, enable clock, deassert reset */ if ((__raw_readl(addr + 0x100) & 3) != 3) { __raw_writel(0, addr + 0x100); - wmb(); + wmb(); /* drain writebuffer */ __raw_writel(1, addr + 0x100); - wmb(); + wmb(); /* drain writebuffer */ } __raw_writel(3, addr + 0x100); - wmb(); + wmb(); /* drain writebuffer */ } static inline void alchemy_uart_disable(u32 uart_phys) { void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); + __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */ - wmb(); + wmb(); /* drain writebuffer */ } static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) @@ -223,7 +760,7 @@ static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) } while (--timeout); __raw_writel(c, base + 0x04); /* tx */ - wmb(); + wmb(); /* drain writebuffer */ } /* return number of ethernet MACs on a given cputype */ @@ -240,20 +777,13 @@ static inline int alchemy_get_macs(int type) return 0; } -/* arch/mips/au1000/common/clocks.c */ -extern void set_au1x00_speed(unsigned int new_freq); -extern unsigned int get_au1x00_speed(void); -extern void set_au1x00_uart_baud_base(unsigned long new_baud_base); -extern unsigned long get_au1x00_uart_baud_base(void); -extern unsigned long au1xxx_calc_clock(void); - /* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */ void alchemy_sleep_au1000(void); void alchemy_sleep_au1550(void); void alchemy_sleep_au1300(void); void au_sleep(void); -/* USB: drivers/usb/host/alchemy-common.c */ +/* USB: arch/mips/alchemy/common/usb.c */ enum alchemy_usb_block { ALCHEMY_USB_OHCI0, ALCHEMY_USB_UDC0, @@ -272,6 +802,20 @@ struct alchemy_pci_platdata { unsigned long pci_cfg_clr; }; +/* The IrDA peripheral has an IRFIRSEL pin, but on the DB/PB boards it's + * not used to select FIR/SIR mode on the transceiver but as a GPIO. + * Instead a CPLD has to be told about the mode. The driver calls the + * set_phy_mode() function in addition to driving the IRFIRSEL pin. + */ +#define AU1000_IRDA_PHY_MODE_OFF 0 +#define AU1000_IRDA_PHY_MODE_SIR 1 +#define AU1000_IRDA_PHY_MODE_FIR 2 + +struct au1k_irda_platform_data { + void (*set_phy_mode)(int mode); +}; + + /* Multifunction pins: Each of these pins can either be assigned to the * GPIO controller or a on-chip peripheral. * Call "au1300_pinfunc_to_dev()" or "au1300_pinfunc_to_gpio()" to @@ -344,20 +888,6 @@ enum au1300_vss_block { extern void au1300_vss_block_control(int block, int enable); - -/* SOC Interrupt numbers */ -/* Au1000-style (IC0/1): 2 controllers with 32 sources each */ -#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8) -#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31) -#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_LAST + 1) -#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31) -#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST - -/* Au1300-style (GPIC): 1 controller with up to 128 sources */ -#define ALCHEMY_GPIC_INT_BASE (MIPS_CPU_IRQ_BASE + 8) -#define ALCHEMY_GPIC_INT_NUM 128 -#define ALCHEMY_GPIC_INT_LAST (ALCHEMY_GPIC_INT_BASE + ALCHEMY_GPIC_INT_NUM - 1) - enum soc_au1000_ints { AU1000_FIRST_INT = AU1000_INTC0_INT_BASE, AU1000_UART0_INT = AU1000_FIRST_INT, @@ -678,885 +1208,4 @@ enum soc_au1200_ints { #endif /* !defined (_LANGUAGE_ASSEMBLY) */ -/* Au1300 peripheral interrupt numbers */ -#define AU1300_FIRST_INT (ALCHEMY_GPIC_INT_BASE) -#define AU1300_UART1_INT (AU1300_FIRST_INT + 17) -#define AU1300_UART2_INT (AU1300_FIRST_INT + 25) -#define AU1300_UART3_INT (AU1300_FIRST_INT + 27) -#define AU1300_SD1_INT (AU1300_FIRST_INT + 32) -#define AU1300_SD2_INT (AU1300_FIRST_INT + 38) -#define AU1300_PSC0_INT (AU1300_FIRST_INT + 48) -#define AU1300_PSC1_INT (AU1300_FIRST_INT + 52) -#define AU1300_PSC2_INT (AU1300_FIRST_INT + 56) -#define AU1300_PSC3_INT (AU1300_FIRST_INT + 60) -#define AU1300_NAND_INT (AU1300_FIRST_INT + 62) -#define AU1300_DDMA_INT (AU1300_FIRST_INT + 75) -#define AU1300_MMU_INT (AU1300_FIRST_INT + 76) -#define AU1300_MPU_INT (AU1300_FIRST_INT + 77) -#define AU1300_GPU_INT (AU1300_FIRST_INT + 78) -#define AU1300_UDMA_INT (AU1300_FIRST_INT + 79) -#define AU1300_TOY_INT (AU1300_FIRST_INT + 80) -#define AU1300_TOY_MATCH0_INT (AU1300_FIRST_INT + 81) -#define AU1300_TOY_MATCH1_INT (AU1300_FIRST_INT + 82) -#define AU1300_TOY_MATCH2_INT (AU1300_FIRST_INT + 83) -#define AU1300_RTC_INT (AU1300_FIRST_INT + 84) -#define AU1300_RTC_MATCH0_INT (AU1300_FIRST_INT + 85) -#define AU1300_RTC_MATCH1_INT (AU1300_FIRST_INT + 86) -#define AU1300_RTC_MATCH2_INT (AU1300_FIRST_INT + 87) -#define AU1300_UART0_INT (AU1300_FIRST_INT + 88) -#define AU1300_SD0_INT (AU1300_FIRST_INT + 89) -#define AU1300_USB_INT (AU1300_FIRST_INT + 90) -#define AU1300_LCD_INT (AU1300_FIRST_INT + 91) -#define AU1300_BSA_INT (AU1300_FIRST_INT + 92) -#define AU1300_MPE_INT (AU1300_FIRST_INT + 93) -#define AU1300_ITE_INT (AU1300_FIRST_INT + 94) -#define AU1300_AES_INT (AU1300_FIRST_INT + 95) -#define AU1300_CIM_INT (AU1300_FIRST_INT + 96) - -/**********************************************************************/ - -/* - * Physical base addresses for integrated peripherals - * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 5..au1300 - */ - -#define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ -#define AU1300_ROM_PHYS_ADDR 0x10000000 /* 5 */ -#define AU1300_OTP_PHYS_ADDR 0x10002000 /* 5 */ -#define AU1300_VSS_PHYS_ADDR 0x10003000 /* 5 */ -#define AU1300_UART0_PHYS_ADDR 0x10100000 /* 5 */ -#define AU1300_UART1_PHYS_ADDR 0x10101000 /* 5 */ -#define AU1300_UART2_PHYS_ADDR 0x10102000 /* 5 */ -#define AU1300_UART3_PHYS_ADDR 0x10103000 /* 5 */ -#define AU1000_USB_OHCI_PHYS_ADDR 0x10100000 /* 012 */ -#define AU1000_USB_UDC_PHYS_ADDR 0x10200000 /* 0123 */ -#define AU1300_GPIC_PHYS_ADDR 0x10200000 /* 5 */ -#define AU1000_IRDA_PHYS_ADDR 0x10300000 /* 02 */ -#define AU1200_AES_PHYS_ADDR 0x10300000 /* 45 */ -#define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ -#define AU1300_GPU_PHYS_ADDR 0x10500000 /* 5 */ -#define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ -#define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ -#define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ -#define AU1100_SD0_PHYS_ADDR 0x10600000 /* 245 */ -#define AU1300_SD1_PHYS_ADDR 0x10601000 /* 5 */ -#define AU1300_SD2_PHYS_ADDR 0x10602000 /* 5 */ -#define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ -#define AU1300_SYS_PHYS_ADDR 0x10900000 /* 5 */ -#define AU1550_PSC2_PHYS_ADDR 0x10A00000 /* 3 */ -#define AU1550_PSC3_PHYS_ADDR 0x10B00000 /* 3 */ -#define AU1300_PSC0_PHYS_ADDR 0x10A00000 /* 5 */ -#define AU1300_PSC1_PHYS_ADDR 0x10A01000 /* 5 */ -#define AU1300_PSC2_PHYS_ADDR 0x10A02000 /* 5 */ -#define AU1300_PSC3_PHYS_ADDR 0x10A03000 /* 5 */ -#define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ -#define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ -#define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ -#define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ -#define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ -#define AU1200_SWCNT_PHYS_ADDR 0x1110010C /* 4 */ -#define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ -#define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ -#define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ -#define AU1000_SSI0_PHYS_ADDR 0x11600000 /* 02 */ -#define AU1000_SSI1_PHYS_ADDR 0x11680000 /* 02 */ -#define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ -#define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ -#define AU1000_SYS_PHYS_ADDR 0x11900000 /* 012345 */ -#define AU1550_PSC0_PHYS_ADDR 0x11A00000 /* 34 */ -#define AU1550_PSC1_PHYS_ADDR 0x11B00000 /* 34 */ -#define AU1000_MEM_PHYS_ADDR 0x14000000 /* 01234 */ -#define AU1000_STATIC_MEM_PHYS_ADDR 0x14001000 /* 01234 */ -#define AU1300_UDMA_PHYS_ADDR 0x14001800 /* 5 */ -#define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ -#define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 345 */ -#define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 345 */ -#define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ -#define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ -#define AU1200_CIM_PHYS_ADDR 0x14004000 /* 45 */ -#define AU1500_PCI_PHYS_ADDR 0x14005000 /* 13 */ -#define AU1550_PE_PHYS_ADDR 0x14008000 /* 3 */ -#define AU1200_MAEBE_PHYS_ADDR 0x14010000 /* 4 */ -#define AU1200_MAEFE_PHYS_ADDR 0x14012000 /* 4 */ -#define AU1300_MAEITE_PHYS_ADDR 0x14010000 /* 5 */ -#define AU1300_MAEMPE_PHYS_ADDR 0x14014000 /* 5 */ -#define AU1550_USB_OHCI_PHYS_ADDR 0x14020000 /* 3 */ -#define AU1200_USB_CTL_PHYS_ADDR 0x14020000 /* 4 */ -#define AU1200_USB_OTG_PHYS_ADDR 0x14020020 /* 4 */ -#define AU1200_USB_OHCI_PHYS_ADDR 0x14020100 /* 4 */ -#define AU1200_USB_EHCI_PHYS_ADDR 0x14020200 /* 4 */ -#define AU1200_USB_UDC_PHYS_ADDR 0x14022000 /* 4 */ -#define AU1300_USB_EHCI_PHYS_ADDR 0x14020000 /* 5 */ -#define AU1300_USB_OHCI0_PHYS_ADDR 0x14020400 /* 5 */ -#define AU1300_USB_OHCI1_PHYS_ADDR 0x14020800 /* 5 */ -#define AU1300_USB_CTL_PHYS_ADDR 0x14021000 /* 5 */ -#define AU1300_USB_OTG_PHYS_ADDR 0x14022000 /* 5 */ -#define AU1300_MAEBSA_PHYS_ADDR 0x14030000 /* 5 */ -#define AU1100_LCD_PHYS_ADDR 0x15000000 /* 2 */ -#define AU1200_LCD_PHYS_ADDR 0x15000000 /* 45 */ -#define AU1500_PCI_MEM_PHYS_ADDR 0x400000000ULL /* 13 */ -#define AU1500_PCI_IO_PHYS_ADDR 0x500000000ULL /* 13 */ -#define AU1500_PCI_CONFIG0_PHYS_ADDR 0x600000000ULL /* 13 */ -#define AU1500_PCI_CONFIG1_PHYS_ADDR 0x680000000ULL /* 13 */ -#define AU1000_PCMCIA_IO_PHYS_ADDR 0xF00000000ULL /* 012345 */ -#define AU1000_PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL /* 012345 */ -#define AU1000_PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL /* 012345 */ - -/**********************************************************************/ - - -/* - * Au1300 GPIO+INT controller (GPIC) register offsets and bits - * Registers are 128bits (0x10 bytes), divided into 4 "banks". - */ -#define AU1300_GPIC_PINVAL 0x0000 -#define AU1300_GPIC_PINVALCLR 0x0010 -#define AU1300_GPIC_IPEND 0x0020 -#define AU1300_GPIC_PRIENC 0x0030 -#define AU1300_GPIC_IEN 0x0040 /* int_mask in manual */ -#define AU1300_GPIC_IDIS 0x0050 /* int_maskclr in manual */ -#define AU1300_GPIC_DMASEL 0x0060 -#define AU1300_GPIC_DEVSEL 0x0080 -#define AU1300_GPIC_DEVCLR 0x0090 -#define AU1300_GPIC_RSTVAL 0x00a0 -/* pin configuration space. one 32bit register for up to 128 IRQs */ -#define AU1300_GPIC_PINCFG 0x1000 - -#define GPIC_GPIO_TO_BIT(gpio) \ - (1 << ((gpio) & 0x1f)) - -#define GPIC_GPIO_BANKOFF(gpio) \ - (((gpio) >> 5) * 4) - -/* Pin Control bits: who owns the pin, what does it do */ -#define GPIC_CFG_PC_GPIN 0 -#define GPIC_CFG_PC_DEV 1 -#define GPIC_CFG_PC_GPOLOW 2 -#define GPIC_CFG_PC_GPOHIGH 3 -#define GPIC_CFG_PC_MASK 3 - -/* assign pin to MIPS IRQ line */ -#define GPIC_CFG_IL_SET(x) (((x) & 3) << 2) -#define GPIC_CFG_IL_MASK (3 << 2) - -/* pin interrupt type setup */ -#define GPIC_CFG_IC_OFF (0 << 4) -#define GPIC_CFG_IC_LEVEL_LOW (1 << 4) -#define GPIC_CFG_IC_LEVEL_HIGH (2 << 4) -#define GPIC_CFG_IC_EDGE_FALL (5 << 4) -#define GPIC_CFG_IC_EDGE_RISE (6 << 4) -#define GPIC_CFG_IC_EDGE_BOTH (7 << 4) -#define GPIC_CFG_IC_MASK (7 << 4) - -/* allow interrupt to wake cpu from 'wait' */ -#define GPIC_CFG_IDLEWAKE (1 << 7) - -/***********************************************************************/ - -/* Au1000 SDRAM memory controller register offsets */ -#define AU1000_MEM_SDMODE0 0x0000 -#define AU1000_MEM_SDMODE1 0x0004 -#define AU1000_MEM_SDMODE2 0x0008 -#define AU1000_MEM_SDADDR0 0x000C -#define AU1000_MEM_SDADDR1 0x0010 -#define AU1000_MEM_SDADDR2 0x0014 -#define AU1000_MEM_SDREFCFG 0x0018 -#define AU1000_MEM_SDPRECMD 0x001C -#define AU1000_MEM_SDAUTOREF 0x0020 -#define AU1000_MEM_SDWRMD0 0x0024 -#define AU1000_MEM_SDWRMD1 0x0028 -#define AU1000_MEM_SDWRMD2 0x002C -#define AU1000_MEM_SDSLEEP 0x0030 -#define AU1000_MEM_SDSMCKE 0x0034 - -/* MEM_SDMODE register content definitions */ -#define MEM_SDMODE_F (1 << 22) -#define MEM_SDMODE_SR (1 << 21) -#define MEM_SDMODE_BS (1 << 20) -#define MEM_SDMODE_RS (3 << 18) -#define MEM_SDMODE_CS (7 << 15) -#define MEM_SDMODE_TRAS (15 << 11) -#define MEM_SDMODE_TMRD (3 << 9) -#define MEM_SDMODE_TWR (3 << 7) -#define MEM_SDMODE_TRP (3 << 5) -#define MEM_SDMODE_TRCD (3 << 3) -#define MEM_SDMODE_TCL (7 << 0) - -#define MEM_SDMODE_BS_2Bank (0 << 20) -#define MEM_SDMODE_BS_4Bank (1 << 20) -#define MEM_SDMODE_RS_11Row (0 << 18) -#define MEM_SDMODE_RS_12Row (1 << 18) -#define MEM_SDMODE_RS_13Row (2 << 18) -#define MEM_SDMODE_RS_N(N) ((N) << 18) -#define MEM_SDMODE_CS_7Col (0 << 15) -#define MEM_SDMODE_CS_8Col (1 << 15) -#define MEM_SDMODE_CS_9Col (2 << 15) -#define MEM_SDMODE_CS_10Col (3 << 15) -#define MEM_SDMODE_CS_11Col (4 << 15) -#define MEM_SDMODE_CS_N(N) ((N) << 15) -#define MEM_SDMODE_TRAS_N(N) ((N) << 11) -#define MEM_SDMODE_TMRD_N(N) ((N) << 9) -#define MEM_SDMODE_TWR_N(N) ((N) << 7) -#define MEM_SDMODE_TRP_N(N) ((N) << 5) -#define MEM_SDMODE_TRCD_N(N) ((N) << 3) -#define MEM_SDMODE_TCL_N(N) ((N) << 0) - -/* MEM_SDADDR register contents definitions */ -#define MEM_SDADDR_E (1 << 20) -#define MEM_SDADDR_CSBA (0x03FF << 10) -#define MEM_SDADDR_CSMASK (0x03FF << 0) -#define MEM_SDADDR_CSBA_N(N) ((N) & (0x03FF << 22) >> 12) -#define MEM_SDADDR_CSMASK_N(N) ((N)&(0x03FF << 22) >> 22) - -/* MEM_SDREFCFG register content definitions */ -#define MEM_SDREFCFG_TRC (15 << 28) -#define MEM_SDREFCFG_TRPM (3 << 26) -#define MEM_SDREFCFG_E (1 << 25) -#define MEM_SDREFCFG_RE (0x1ffffff << 0) -#define MEM_SDREFCFG_TRC_N(N) ((N) << MEM_SDREFCFG_TRC) -#define MEM_SDREFCFG_TRPM_N(N) ((N) << MEM_SDREFCFG_TRPM) -#define MEM_SDREFCFG_REF_N(N) (N) - -/* Au1550 SDRAM Register Offsets */ -#define AU1550_MEM_SDMODE0 0x0800 -#define AU1550_MEM_SDMODE1 0x0808 -#define AU1550_MEM_SDMODE2 0x0810 -#define AU1550_MEM_SDADDR0 0x0820 -#define AU1550_MEM_SDADDR1 0x0828 -#define AU1550_MEM_SDADDR2 0x0830 -#define AU1550_MEM_SDCONFIGA 0x0840 -#define AU1550_MEM_SDCONFIGB 0x0848 -#define AU1550_MEM_SDSTAT 0x0850 -#define AU1550_MEM_SDERRADDR 0x0858 -#define AU1550_MEM_SDSTRIDE0 0x0860 -#define AU1550_MEM_SDSTRIDE1 0x0868 -#define AU1550_MEM_SDSTRIDE2 0x0870 -#define AU1550_MEM_SDWRMD0 0x0880 -#define AU1550_MEM_SDWRMD1 0x0888 -#define AU1550_MEM_SDWRMD2 0x0890 -#define AU1550_MEM_SDPRECMD 0x08C0 -#define AU1550_MEM_SDAUTOREF 0x08C8 -#define AU1550_MEM_SDSREF 0x08D0 -#define AU1550_MEM_SDSLEEP MEM_SDSREF - -/* Static Bus Controller */ -#define MEM_STCFG0 0xB4001000 -#define MEM_STTIME0 0xB4001004 -#define MEM_STADDR0 0xB4001008 - -#define MEM_STCFG1 0xB4001010 -#define MEM_STTIME1 0xB4001014 -#define MEM_STADDR1 0xB4001018 - -#define MEM_STCFG2 0xB4001020 -#define MEM_STTIME2 0xB4001024 -#define MEM_STADDR2 0xB4001028 - -#define MEM_STCFG3 0xB4001030 -#define MEM_STTIME3 0xB4001034 -#define MEM_STADDR3 0xB4001038 - -#define MEM_STNDCTL 0xB4001100 -#define MEM_STSTAT 0xB4001104 - -#define MEM_STNAND_CMD 0x0 -#define MEM_STNAND_ADDR 0x4 -#define MEM_STNAND_DATA 0x20 - - -/* Programmable Counters 0 and 1 */ -#define SYS_BASE 0xB1900000 -#define SYS_COUNTER_CNTRL (SYS_BASE + 0x14) -# define SYS_CNTRL_E1S (1 << 23) -# define SYS_CNTRL_T1S (1 << 20) -# define SYS_CNTRL_M21 (1 << 19) -# define SYS_CNTRL_M11 (1 << 18) -# define SYS_CNTRL_M01 (1 << 17) -# define SYS_CNTRL_C1S (1 << 16) -# define SYS_CNTRL_BP (1 << 14) -# define SYS_CNTRL_EN1 (1 << 13) -# define SYS_CNTRL_BT1 (1 << 12) -# define SYS_CNTRL_EN0 (1 << 11) -# define SYS_CNTRL_BT0 (1 << 10) -# define SYS_CNTRL_E0 (1 << 8) -# define SYS_CNTRL_E0S (1 << 7) -# define SYS_CNTRL_32S (1 << 5) -# define SYS_CNTRL_T0S (1 << 4) -# define SYS_CNTRL_M20 (1 << 3) -# define SYS_CNTRL_M10 (1 << 2) -# define SYS_CNTRL_M00 (1 << 1) -# define SYS_CNTRL_C0S (1 << 0) - -/* Programmable Counter 0 Registers */ -#define SYS_TOYTRIM (SYS_BASE + 0) -#define SYS_TOYWRITE (SYS_BASE + 4) -#define SYS_TOYMATCH0 (SYS_BASE + 8) -#define SYS_TOYMATCH1 (SYS_BASE + 0xC) -#define SYS_TOYMATCH2 (SYS_BASE + 0x10) -#define SYS_TOYREAD (SYS_BASE + 0x40) - -/* Programmable Counter 1 Registers */ -#define SYS_RTCTRIM (SYS_BASE + 0x44) -#define SYS_RTCWRITE (SYS_BASE + 0x48) -#define SYS_RTCMATCH0 (SYS_BASE + 0x4C) -#define SYS_RTCMATCH1 (SYS_BASE + 0x50) -#define SYS_RTCMATCH2 (SYS_BASE + 0x54) -#define SYS_RTCREAD (SYS_BASE + 0x58) - -/* I2S Controller */ -#define I2S_DATA 0xB1000000 -# define I2S_DATA_MASK 0xffffff -#define I2S_CONFIG 0xB1000004 -# define I2S_CONFIG_XU (1 << 25) -# define I2S_CONFIG_XO (1 << 24) -# define I2S_CONFIG_RU (1 << 23) -# define I2S_CONFIG_RO (1 << 22) -# define I2S_CONFIG_TR (1 << 21) -# define I2S_CONFIG_TE (1 << 20) -# define I2S_CONFIG_TF (1 << 19) -# define I2S_CONFIG_RR (1 << 18) -# define I2S_CONFIG_RE (1 << 17) -# define I2S_CONFIG_RF (1 << 16) -# define I2S_CONFIG_PD (1 << 11) -# define I2S_CONFIG_LB (1 << 10) -# define I2S_CONFIG_IC (1 << 9) -# define I2S_CONFIG_FM_BIT 7 -# define I2S_CONFIG_FM_MASK (0x3 << I2S_CONFIG_FM_BIT) -# define I2S_CONFIG_FM_I2S (0x0 << I2S_CONFIG_FM_BIT) -# define I2S_CONFIG_FM_LJ (0x1 << I2S_CONFIG_FM_BIT) -# define I2S_CONFIG_FM_RJ (0x2 << I2S_CONFIG_FM_BIT) -# define I2S_CONFIG_TN (1 << 6) -# define I2S_CONFIG_RN (1 << 5) -# define I2S_CONFIG_SZ_BIT 0 -# define I2S_CONFIG_SZ_MASK (0x1F << I2S_CONFIG_SZ_BIT) - -#define I2S_CONTROL 0xB1000008 -# define I2S_CONTROL_D (1 << 1) -# define I2S_CONTROL_CE (1 << 0) - - -/* Ethernet Controllers */ - -/* 4 byte offsets from AU1000_ETH_BASE */ -#define MAC_CONTROL 0x0 -# define MAC_RX_ENABLE (1 << 2) -# define MAC_TX_ENABLE (1 << 3) -# define MAC_DEF_CHECK (1 << 5) -# define MAC_SET_BL(X) (((X) & 0x3) << 6) -# define MAC_AUTO_PAD (1 << 8) -# define MAC_DISABLE_RETRY (1 << 10) -# define MAC_DISABLE_BCAST (1 << 11) -# define MAC_LATE_COL (1 << 12) -# define MAC_HASH_MODE (1 << 13) -# define MAC_HASH_ONLY (1 << 15) -# define MAC_PASS_ALL (1 << 16) -# define MAC_INVERSE_FILTER (1 << 17) -# define MAC_PROMISCUOUS (1 << 18) -# define MAC_PASS_ALL_MULTI (1 << 19) -# define MAC_FULL_DUPLEX (1 << 20) -# define MAC_NORMAL_MODE 0 -# define MAC_INT_LOOPBACK (1 << 21) -# define MAC_EXT_LOOPBACK (1 << 22) -# define MAC_DISABLE_RX_OWN (1 << 23) -# define MAC_BIG_ENDIAN (1 << 30) -# define MAC_RX_ALL (1 << 31) -#define MAC_ADDRESS_HIGH 0x4 -#define MAC_ADDRESS_LOW 0x8 -#define MAC_MCAST_HIGH 0xC -#define MAC_MCAST_LOW 0x10 -#define MAC_MII_CNTRL 0x14 -# define MAC_MII_BUSY (1 << 0) -# define MAC_MII_READ 0 -# define MAC_MII_WRITE (1 << 1) -# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6) -# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11) -#define MAC_MII_DATA 0x18 -#define MAC_FLOW_CNTRL 0x1C -# define MAC_FLOW_CNTRL_BUSY (1 << 0) -# define MAC_FLOW_CNTRL_ENABLE (1 << 1) -# define MAC_PASS_CONTROL (1 << 2) -# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16) -#define MAC_VLAN1_TAG 0x20 -#define MAC_VLAN2_TAG 0x24 - -/* Ethernet Controller Enable */ - -# define MAC_EN_CLOCK_ENABLE (1 << 0) -# define MAC_EN_RESET0 (1 << 1) -# define MAC_EN_TOSS (0 << 2) -# define MAC_EN_CACHEABLE (1 << 3) -# define MAC_EN_RESET1 (1 << 4) -# define MAC_EN_RESET2 (1 << 5) -# define MAC_DMA_RESET (1 << 6) - -/* Ethernet Controller DMA Channels */ - -#define MAC0_TX_DMA_ADDR 0xB4004000 -#define MAC1_TX_DMA_ADDR 0xB4004200 -/* offsets from MAC_TX_RING_ADDR address */ -#define MAC_TX_BUFF0_STATUS 0x0 -# define TX_FRAME_ABORTED (1 << 0) -# define TX_JAB_TIMEOUT (1 << 1) -# define TX_NO_CARRIER (1 << 2) -# define TX_LOSS_CARRIER (1 << 3) -# define TX_EXC_DEF (1 << 4) -# define TX_LATE_COLL_ABORT (1 << 5) -# define TX_EXC_COLL (1 << 6) -# define TX_UNDERRUN (1 << 7) -# define TX_DEFERRED (1 << 8) -# define TX_LATE_COLL (1 << 9) -# define TX_COLL_CNT_MASK (0xF << 10) -# define TX_PKT_RETRY (1 << 31) -#define MAC_TX_BUFF0_ADDR 0x4 -# define TX_DMA_ENABLE (1 << 0) -# define TX_T_DONE (1 << 1) -# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) -#define MAC_TX_BUFF0_LEN 0x8 -#define MAC_TX_BUFF1_STATUS 0x10 -#define MAC_TX_BUFF1_ADDR 0x14 -#define MAC_TX_BUFF1_LEN 0x18 -#define MAC_TX_BUFF2_STATUS 0x20 -#define MAC_TX_BUFF2_ADDR 0x24 -#define MAC_TX_BUFF2_LEN 0x28 -#define MAC_TX_BUFF3_STATUS 0x30 -#define MAC_TX_BUFF3_ADDR 0x34 -#define MAC_TX_BUFF3_LEN 0x38 - -#define MAC0_RX_DMA_ADDR 0xB4004100 -#define MAC1_RX_DMA_ADDR 0xB4004300 -/* offsets from MAC_RX_RING_ADDR */ -#define MAC_RX_BUFF0_STATUS 0x0 -# define RX_FRAME_LEN_MASK 0x3fff -# define RX_WDOG_TIMER (1 << 14) -# define RX_RUNT (1 << 15) -# define RX_OVERLEN (1 << 16) -# define RX_COLL (1 << 17) -# define RX_ETHER (1 << 18) -# define RX_MII_ERROR (1 << 19) -# define RX_DRIBBLING (1 << 20) -# define RX_CRC_ERROR (1 << 21) -# define RX_VLAN1 (1 << 22) -# define RX_VLAN2 (1 << 23) -# define RX_LEN_ERROR (1 << 24) -# define RX_CNTRL_FRAME (1 << 25) -# define RX_U_CNTRL_FRAME (1 << 26) -# define RX_MCAST_FRAME (1 << 27) -# define RX_BCAST_FRAME (1 << 28) -# define RX_FILTER_FAIL (1 << 29) -# define RX_PACKET_FILTER (1 << 30) -# define RX_MISSED_FRAME (1 << 31) - -# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \ - RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \ - RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME) -#define MAC_RX_BUFF0_ADDR 0x4 -# define RX_DMA_ENABLE (1 << 0) -# define RX_T_DONE (1 << 1) -# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) -# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0) -#define MAC_RX_BUFF1_STATUS 0x10 -#define MAC_RX_BUFF1_ADDR 0x14 -#define MAC_RX_BUFF2_STATUS 0x20 -#define MAC_RX_BUFF2_ADDR 0x24 -#define MAC_RX_BUFF3_STATUS 0x30 -#define MAC_RX_BUFF3_ADDR 0x34 - -/* SSIO */ -#define SSI0_STATUS 0xB1600000 -# define SSI_STATUS_BF (1 << 4) -# define SSI_STATUS_OF (1 << 3) -# define SSI_STATUS_UF (1 << 2) -# define SSI_STATUS_D (1 << 1) -# define SSI_STATUS_B (1 << 0) -#define SSI0_INT 0xB1600004 -# define SSI_INT_OI (1 << 3) -# define SSI_INT_UI (1 << 2) -# define SSI_INT_DI (1 << 1) -#define SSI0_INT_ENABLE 0xB1600008 -# define SSI_INTE_OIE (1 << 3) -# define SSI_INTE_UIE (1 << 2) -# define SSI_INTE_DIE (1 << 1) -#define SSI0_CONFIG 0xB1600020 -# define SSI_CONFIG_AO (1 << 24) -# define SSI_CONFIG_DO (1 << 23) -# define SSI_CONFIG_ALEN_BIT 20 -# define SSI_CONFIG_ALEN_MASK (0x7 << 20) -# define SSI_CONFIG_DLEN_BIT 16 -# define SSI_CONFIG_DLEN_MASK (0x7 << 16) -# define SSI_CONFIG_DD (1 << 11) -# define SSI_CONFIG_AD (1 << 10) -# define SSI_CONFIG_BM_BIT 8 -# define SSI_CONFIG_BM_MASK (0x3 << 8) -# define SSI_CONFIG_CE (1 << 7) -# define SSI_CONFIG_DP (1 << 6) -# define SSI_CONFIG_DL (1 << 5) -# define SSI_CONFIG_EP (1 << 4) -#define SSI0_ADATA 0xB1600024 -# define SSI_AD_D (1 << 24) -# define SSI_AD_ADDR_BIT 16 -# define SSI_AD_ADDR_MASK (0xff << 16) -# define SSI_AD_DATA_BIT 0 -# define SSI_AD_DATA_MASK (0xfff << 0) -#define SSI0_CLKDIV 0xB1600028 -#define SSI0_CONTROL 0xB1600100 -# define SSI_CONTROL_CD (1 << 1) -# define SSI_CONTROL_E (1 << 0) - -/* SSI1 */ -#define SSI1_STATUS 0xB1680000 -#define SSI1_INT 0xB1680004 -#define SSI1_INT_ENABLE 0xB1680008 -#define SSI1_CONFIG 0xB1680020 -#define SSI1_ADATA 0xB1680024 -#define SSI1_CLKDIV 0xB1680028 -#define SSI1_ENABLE 0xB1680100 - -/* - * Register content definitions - */ -#define SSI_STATUS_BF (1 << 4) -#define SSI_STATUS_OF (1 << 3) -#define SSI_STATUS_UF (1 << 2) -#define SSI_STATUS_D (1 << 1) -#define SSI_STATUS_B (1 << 0) - -/* SSI_INT */ -#define SSI_INT_OI (1 << 3) -#define SSI_INT_UI (1 << 2) -#define SSI_INT_DI (1 << 1) - -/* SSI_INTEN */ -#define SSI_INTEN_OIE (1 << 3) -#define SSI_INTEN_UIE (1 << 2) -#define SSI_INTEN_DIE (1 << 1) - -#define SSI_CONFIG_AO (1 << 24) -#define SSI_CONFIG_DO (1 << 23) -#define SSI_CONFIG_ALEN (7 << 20) -#define SSI_CONFIG_DLEN (15 << 16) -#define SSI_CONFIG_DD (1 << 11) -#define SSI_CONFIG_AD (1 << 10) -#define SSI_CONFIG_BM (3 << 8) -#define SSI_CONFIG_CE (1 << 7) -#define SSI_CONFIG_DP (1 << 6) -#define SSI_CONFIG_DL (1 << 5) -#define SSI_CONFIG_EP (1 << 4) -#define SSI_CONFIG_ALEN_N(N) ((N-1) << 20) -#define SSI_CONFIG_DLEN_N(N) ((N-1) << 16) -#define SSI_CONFIG_BM_HI (0 << 8) -#define SSI_CONFIG_BM_LO (1 << 8) -#define SSI_CONFIG_BM_CY (2 << 8) - -#define SSI_ADATA_D (1 << 24) -#define SSI_ADATA_ADDR (0xFF << 16) -#define SSI_ADATA_DATA 0x0FFF -#define SSI_ADATA_ADDR_N(N) (N << 16) - -#define SSI_ENABLE_CD (1 << 1) -#define SSI_ENABLE_E (1 << 0) - - -/* - * The IrDA peripheral has an IRFIRSEL pin, but on the DB/PB boards it's not - * used to select FIR/SIR mode on the transceiver but as a GPIO. Instead a - * CPLD has to be told about the mode. - */ -#define AU1000_IRDA_PHY_MODE_OFF 0 -#define AU1000_IRDA_PHY_MODE_SIR 1 -#define AU1000_IRDA_PHY_MODE_FIR 2 - -struct au1k_irda_platform_data { - void(*set_phy_mode)(int mode); -}; - - -/* GPIO */ -#define SYS_PINFUNC 0xB190002C -# define SYS_PF_USB (1 << 15) /* 2nd USB device/host */ -# define SYS_PF_U3 (1 << 14) /* GPIO23/U3TXD */ -# define SYS_PF_U2 (1 << 13) /* GPIO22/U2TXD */ -# define SYS_PF_U1 (1 << 12) /* GPIO21/U1TXD */ -# define SYS_PF_SRC (1 << 11) /* GPIO6/SROMCKE */ -# define SYS_PF_CK5 (1 << 10) /* GPIO3/CLK5 */ -# define SYS_PF_CK4 (1 << 9) /* GPIO2/CLK4 */ -# define SYS_PF_IRF (1 << 8) /* GPIO15/IRFIRSEL */ -# define SYS_PF_UR3 (1 << 7) /* GPIO[14:9]/UART3 */ -# define SYS_PF_I2D (1 << 6) /* GPIO8/I2SDI */ -# define SYS_PF_I2S (1 << 5) /* I2S/GPIO[29:31] */ -# define SYS_PF_NI2 (1 << 4) /* NI2/GPIO[24:28] */ -# define SYS_PF_U0 (1 << 3) /* U0TXD/GPIO20 */ -# define SYS_PF_RD (1 << 2) /* IRTXD/GPIO19 */ -# define SYS_PF_A97 (1 << 1) /* AC97/SSL1 */ -# define SYS_PF_S0 (1 << 0) /* SSI_0/GPIO[16:18] */ - -/* Au1100 only */ -# define SYS_PF_PC (1 << 18) /* PCMCIA/GPIO[207:204] */ -# define SYS_PF_LCD (1 << 17) /* extern lcd/GPIO[203:200] */ -# define SYS_PF_CS (1 << 16) /* EXTCLK0/32KHz to gpio2 */ -# define SYS_PF_EX0 (1 << 9) /* GPIO2/clock */ - -/* Au1550 only. Redefines lots of pins */ -# define SYS_PF_PSC2_MASK (7 << 17) -# define SYS_PF_PSC2_AC97 0 -# define SYS_PF_PSC2_SPI 0 -# define SYS_PF_PSC2_I2S (1 << 17) -# define SYS_PF_PSC2_SMBUS (3 << 17) -# define SYS_PF_PSC2_GPIO (7 << 17) -# define SYS_PF_PSC3_MASK (7 << 20) -# define SYS_PF_PSC3_AC97 0 -# define SYS_PF_PSC3_SPI 0 -# define SYS_PF_PSC3_I2S (1 << 20) -# define SYS_PF_PSC3_SMBUS (3 << 20) -# define SYS_PF_PSC3_GPIO (7 << 20) -# define SYS_PF_PSC1_S1 (1 << 1) -# define SYS_PF_MUST_BE_SET ((1 << 5) | (1 << 2)) - -/* Au1200 only */ -#define SYS_PINFUNC_DMA (1 << 31) -#define SYS_PINFUNC_S0A (1 << 30) -#define SYS_PINFUNC_S1A (1 << 29) -#define SYS_PINFUNC_LP0 (1 << 28) -#define SYS_PINFUNC_LP1 (1 << 27) -#define SYS_PINFUNC_LD16 (1 << 26) -#define SYS_PINFUNC_LD8 (1 << 25) -#define SYS_PINFUNC_LD1 (1 << 24) -#define SYS_PINFUNC_LD0 (1 << 23) -#define SYS_PINFUNC_P1A (3 << 21) -#define SYS_PINFUNC_P1B (1 << 20) -#define SYS_PINFUNC_FS3 (1 << 19) -#define SYS_PINFUNC_P0A (3 << 17) -#define SYS_PINFUNC_CS (1 << 16) -#define SYS_PINFUNC_CIM (1 << 15) -#define SYS_PINFUNC_P1C (1 << 14) -#define SYS_PINFUNC_U1T (1 << 12) -#define SYS_PINFUNC_U1R (1 << 11) -#define SYS_PINFUNC_EX1 (1 << 10) -#define SYS_PINFUNC_EX0 (1 << 9) -#define SYS_PINFUNC_U0R (1 << 8) -#define SYS_PINFUNC_MC (1 << 7) -#define SYS_PINFUNC_S0B (1 << 6) -#define SYS_PINFUNC_S0C (1 << 5) -#define SYS_PINFUNC_P0B (1 << 4) -#define SYS_PINFUNC_U0T (1 << 3) -#define SYS_PINFUNC_S1B (1 << 2) - -/* Power Management */ -#define SYS_SCRATCH0 0xB1900018 -#define SYS_SCRATCH1 0xB190001C -#define SYS_WAKEMSK 0xB1900034 -#define SYS_ENDIAN 0xB1900038 -#define SYS_POWERCTRL 0xB190003C -#define SYS_WAKESRC 0xB190005C -#define SYS_SLPPWR 0xB1900078 -#define SYS_SLEEP 0xB190007C - -#define SYS_WAKEMSK_D2 (1 << 9) -#define SYS_WAKEMSK_M2 (1 << 8) -#define SYS_WAKEMSK_GPIO(x) (1 << (x)) - -/* Clock Controller */ -#define SYS_FREQCTRL0 0xB1900020 -# define SYS_FC_FRDIV2_BIT 22 -# define SYS_FC_FRDIV2_MASK (0xff << SYS_FC_FRDIV2_BIT) -# define SYS_FC_FE2 (1 << 21) -# define SYS_FC_FS2 (1 << 20) -# define SYS_FC_FRDIV1_BIT 12 -# define SYS_FC_FRDIV1_MASK (0xff << SYS_FC_FRDIV1_BIT) -# define SYS_FC_FE1 (1 << 11) -# define SYS_FC_FS1 (1 << 10) -# define SYS_FC_FRDIV0_BIT 2 -# define SYS_FC_FRDIV0_MASK (0xff << SYS_FC_FRDIV0_BIT) -# define SYS_FC_FE0 (1 << 1) -# define SYS_FC_FS0 (1 << 0) -#define SYS_FREQCTRL1 0xB1900024 -# define SYS_FC_FRDIV5_BIT 22 -# define SYS_FC_FRDIV5_MASK (0xff << SYS_FC_FRDIV5_BIT) -# define SYS_FC_FE5 (1 << 21) -# define SYS_FC_FS5 (1 << 20) -# define SYS_FC_FRDIV4_BIT 12 -# define SYS_FC_FRDIV4_MASK (0xff << SYS_FC_FRDIV4_BIT) -# define SYS_FC_FE4 (1 << 11) -# define SYS_FC_FS4 (1 << 10) -# define SYS_FC_FRDIV3_BIT 2 -# define SYS_FC_FRDIV3_MASK (0xff << SYS_FC_FRDIV3_BIT) -# define SYS_FC_FE3 (1 << 1) -# define SYS_FC_FS3 (1 << 0) -#define SYS_CLKSRC 0xB1900028 -# define SYS_CS_ME1_BIT 27 -# define SYS_CS_ME1_MASK (0x7 << SYS_CS_ME1_BIT) -# define SYS_CS_DE1 (1 << 26) -# define SYS_CS_CE1 (1 << 25) -# define SYS_CS_ME0_BIT 22 -# define SYS_CS_ME0_MASK (0x7 << SYS_CS_ME0_BIT) -# define SYS_CS_DE0 (1 << 21) -# define SYS_CS_CE0 (1 << 20) -# define SYS_CS_MI2_BIT 17 -# define SYS_CS_MI2_MASK (0x7 << SYS_CS_MI2_BIT) -# define SYS_CS_DI2 (1 << 16) -# define SYS_CS_CI2 (1 << 15) - -# define SYS_CS_ML_BIT 7 -# define SYS_CS_ML_MASK (0x7 << SYS_CS_ML_BIT) -# define SYS_CS_DL (1 << 6) -# define SYS_CS_CL (1 << 5) - -# define SYS_CS_MUH_BIT 12 -# define SYS_CS_MUH_MASK (0x7 << SYS_CS_MUH_BIT) -# define SYS_CS_DUH (1 << 11) -# define SYS_CS_CUH (1 << 10) -# define SYS_CS_MUD_BIT 7 -# define SYS_CS_MUD_MASK (0x7 << SYS_CS_MUD_BIT) -# define SYS_CS_DUD (1 << 6) -# define SYS_CS_CUD (1 << 5) - -# define SYS_CS_MIR_BIT 2 -# define SYS_CS_MIR_MASK (0x7 << SYS_CS_MIR_BIT) -# define SYS_CS_DIR (1 << 1) -# define SYS_CS_CIR (1 << 0) - -# define SYS_CS_MUX_AUX 0x1 -# define SYS_CS_MUX_FQ0 0x2 -# define SYS_CS_MUX_FQ1 0x3 -# define SYS_CS_MUX_FQ2 0x4 -# define SYS_CS_MUX_FQ3 0x5 -# define SYS_CS_MUX_FQ4 0x6 -# define SYS_CS_MUX_FQ5 0x7 -#define SYS_CPUPLL 0xB1900060 -#define SYS_AUXPLL 0xB1900064 - -/* AC97 Controller */ -#define AC97C_CONFIG 0xB0000000 -# define AC97C_RECV_SLOTS_BIT 13 -# define AC97C_RECV_SLOTS_MASK (0x3ff << AC97C_RECV_SLOTS_BIT) -# define AC97C_XMIT_SLOTS_BIT 3 -# define AC97C_XMIT_SLOTS_MASK (0x3ff << AC97C_XMIT_SLOTS_BIT) -# define AC97C_SG (1 << 2) -# define AC97C_SYNC (1 << 1) -# define AC97C_RESET (1 << 0) -#define AC97C_STATUS 0xB0000004 -# define AC97C_XU (1 << 11) -# define AC97C_XO (1 << 10) -# define AC97C_RU (1 << 9) -# define AC97C_RO (1 << 8) -# define AC97C_READY (1 << 7) -# define AC97C_CP (1 << 6) -# define AC97C_TR (1 << 5) -# define AC97C_TE (1 << 4) -# define AC97C_TF (1 << 3) -# define AC97C_RR (1 << 2) -# define AC97C_RE (1 << 1) -# define AC97C_RF (1 << 0) -#define AC97C_DATA 0xB0000008 -#define AC97C_CMD 0xB000000C -# define AC97C_WD_BIT 16 -# define AC97C_READ (1 << 7) -# define AC97C_INDEX_MASK 0x7f -#define AC97C_CNTRL 0xB0000010 -# define AC97C_RS (1 << 1) -# define AC97C_CE (1 << 0) - - -/* The PCI chip selects are outside the 32bit space, and since we can't - * just program the 36bit addresses into BARs, we have to take a chunk - * out of the 32bit space and reserve it for PCI. When these addresses - * are ioremap()ed, they'll be fixed up to the real 36bit address before - * being passed to the real ioremap function. - */ -#define ALCHEMY_PCI_MEMWIN_START (AU1500_PCI_MEM_PHYS_ADDR >> 4) -#define ALCHEMY_PCI_MEMWIN_END (ALCHEMY_PCI_MEMWIN_START + 0x0FFFFFFF) - -/* for PCI IO it's simpler because we get to do the ioremap ourselves and then - * adjust the device's resources. - */ -#define ALCHEMY_PCI_IOWIN_START 0x00001000 -#define ALCHEMY_PCI_IOWIN_END 0x0000FFFF - -#ifdef CONFIG_PCI - -#define IOPORT_RESOURCE_START 0x00001000 /* skip legacy probing */ -#define IOPORT_RESOURCE_END 0xffffffff -#define IOMEM_RESOURCE_START 0x10000000 -#define IOMEM_RESOURCE_END 0xfffffffffULL - -#else - -/* Don't allow any legacy ports probing */ -#define IOPORT_RESOURCE_START 0x10000000 -#define IOPORT_RESOURCE_END 0xffffffff -#define IOMEM_RESOURCE_START 0x10000000 -#define IOMEM_RESOURCE_END 0xfffffffffULL - -#endif - -/* PCI controller block register offsets */ -#define PCI_REG_CMEM 0x0000 -#define PCI_REG_CONFIG 0x0004 -#define PCI_REG_B2BMASK_CCH 0x0008 -#define PCI_REG_B2BBASE0_VID 0x000C -#define PCI_REG_B2BBASE1_SID 0x0010 -#define PCI_REG_MWMASK_DEV 0x0014 -#define PCI_REG_MWBASE_REV_CCL 0x0018 -#define PCI_REG_ERR_ADDR 0x001C -#define PCI_REG_SPEC_INTACK 0x0020 -#define PCI_REG_ID 0x0100 -#define PCI_REG_STATCMD 0x0104 -#define PCI_REG_CLASSREV 0x0108 -#define PCI_REG_PARAM 0x010C -#define PCI_REG_MBAR 0x0110 -#define PCI_REG_TIMEOUT 0x0140 - -/* PCI controller block register bits */ -#define PCI_CMEM_E (1 << 28) /* enable cacheable memory */ -#define PCI_CMEM_CMBASE(x) (((x) & 0x3fff) << 14) -#define PCI_CMEM_CMMASK(x) ((x) & 0x3fff) -#define PCI_CONFIG_ERD (1 << 27) /* pci error during R/W */ -#define PCI_CONFIG_ET (1 << 26) /* error in target mode */ -#define PCI_CONFIG_EF (1 << 25) /* fatal error */ -#define PCI_CONFIG_EP (1 << 24) /* parity error */ -#define PCI_CONFIG_EM (1 << 23) /* multiple errors */ -#define PCI_CONFIG_BM (1 << 22) /* bad master error */ -#define PCI_CONFIG_PD (1 << 20) /* PCI Disable */ -#define PCI_CONFIG_BME (1 << 19) /* Byte Mask Enable for reads */ -#define PCI_CONFIG_NC (1 << 16) /* mark mem access non-coherent */ -#define PCI_CONFIG_IA (1 << 15) /* INTA# enabled (target mode) */ -#define PCI_CONFIG_IP (1 << 13) /* int on PCI_PERR# */ -#define PCI_CONFIG_IS (1 << 12) /* int on PCI_SERR# */ -#define PCI_CONFIG_IMM (1 << 11) /* int on master abort */ -#define PCI_CONFIG_ITM (1 << 10) /* int on target abort (as master) */ -#define PCI_CONFIG_ITT (1 << 9) /* int on target abort (as target) */ -#define PCI_CONFIG_IPB (1 << 8) /* int on PERR# in bus master acc */ -#define PCI_CONFIG_SIC_NO (0 << 6) /* no byte mask changes */ -#define PCI_CONFIG_SIC_BA_ADR (1 << 6) /* on byte/hw acc, invert adr bits */ -#define PCI_CONFIG_SIC_HWA_DAT (2 << 6) /* on halfword acc, swap data */ -#define PCI_CONFIG_SIC_ALL (3 << 6) /* swap data bytes on all accesses */ -#define PCI_CONFIG_ST (1 << 5) /* swap data by target transactions */ -#define PCI_CONFIG_SM (1 << 4) /* swap data from PCI ctl */ -#define PCI_CONFIG_AEN (1 << 3) /* enable internal arbiter */ -#define PCI_CONFIG_R2H (1 << 2) /* REQ2# to hi-prio arbiter */ -#define PCI_CONFIG_R1H (1 << 1) /* REQ1# to hi-prio arbiter */ -#define PCI_CONFIG_CH (1 << 0) /* PCI ctl to hi-prio arbiter */ -#define PCI_B2BMASK_B2BMASK(x) (((x) & 0xffff) << 16) -#define PCI_B2BMASK_CCH(x) ((x) & 0xffff) /* 16 upper bits of class code */ -#define PCI_B2BBASE0_VID_B0(x) (((x) & 0xffff) << 16) -#define PCI_B2BBASE0_VID_SV(x) ((x) & 0xffff) -#define PCI_B2BBASE1_SID_B1(x) (((x) & 0xffff) << 16) -#define PCI_B2BBASE1_SID_SI(x) ((x) & 0xffff) -#define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16) -#define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff) -#define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16) -#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8) -#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff) -#define PCI_ID_DID(x) (((x) & 0xffff) << 16) -#define PCI_ID_VID(x) ((x) & 0xffff) -#define PCI_STATCMD_STATUS(x) (((x) & 0xffff) << 16) -#define PCI_STATCMD_CMD(x) ((x) & 0xffff) -#define PCI_CLASSREV_CLASS(x) (((x) & 0x00ffffff) << 8) -#define PCI_CLASSREV_REV(x) ((x) & 0xff) -#define PCI_PARAM_BIST(x) (((x) & 0xff) << 24) -#define PCI_PARAM_HT(x) (((x) & 0xff) << 16) -#define PCI_PARAM_LT(x) (((x) & 0xff) << 8) -#define PCI_PARAM_CLS(x) ((x) & 0xff) -#define PCI_TIMEOUT_RETRIES(x) (((x) & 0xff) << 8) /* max retries */ -#define PCI_TIMEOUT_TO(x) ((x) & 0xff) /* target ready timeout */ - #endif diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h index 7cedca5a305c..0a0cd4270c6f 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h @@ -106,7 +106,7 @@ enum { struct dma_chan { int dev_id; /* this channel is allocated if >= 0, */ /* free otherwise */ - unsigned int io; + void __iomem *io; const char *dev_str; int irq; void *irq_dev; @@ -157,7 +157,7 @@ static inline void enable_dma_buffer0(unsigned int dmanr) if (!chan) return; - au_writel(DMA_BE0, chan->io + DMA_MODE_SET); + __raw_writel(DMA_BE0, chan->io + DMA_MODE_SET); } static inline void enable_dma_buffer1(unsigned int dmanr) @@ -166,7 +166,7 @@ static inline void enable_dma_buffer1(unsigned int dmanr) if (!chan) return; - au_writel(DMA_BE1, chan->io + DMA_MODE_SET); + __raw_writel(DMA_BE1, chan->io + DMA_MODE_SET); } static inline void enable_dma_buffers(unsigned int dmanr) { @@ -174,7 +174,7 @@ static inline void enable_dma_buffers(unsigned int dmanr) if (!chan) return; - au_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET); + __raw_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET); } static inline void start_dma(unsigned int dmanr) @@ -183,7 +183,7 @@ static inline void start_dma(unsigned int dmanr) if (!chan) return; - au_writel(DMA_GO, chan->io + DMA_MODE_SET); + __raw_writel(DMA_GO, chan->io + DMA_MODE_SET); } #define DMA_HALT_POLL 0x5000 @@ -195,11 +195,11 @@ static inline void halt_dma(unsigned int dmanr) if (!chan) return; - au_writel(DMA_GO, chan->io + DMA_MODE_CLEAR); + __raw_writel(DMA_GO, chan->io + DMA_MODE_CLEAR); /* Poll the halt bit */ for (i = 0; i < DMA_HALT_POLL; i++) - if (au_readl(chan->io + DMA_MODE_READ) & DMA_HALT) + if (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) break; if (i == DMA_HALT_POLL) printk(KERN_INFO "halt_dma: HALT poll expired!\n"); @@ -215,7 +215,7 @@ static inline void disable_dma(unsigned int dmanr) halt_dma(dmanr); /* Now we can disable the buffers */ - au_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR); + __raw_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR); } static inline int dma_halted(unsigned int dmanr) @@ -224,7 +224,7 @@ static inline int dma_halted(unsigned int dmanr) if (!chan) return 1; - return (au_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0; + return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0; } /* Initialize a DMA channel. */ @@ -239,14 +239,14 @@ static inline void init_dma(unsigned int dmanr) disable_dma(dmanr); /* Set device FIFO address */ - au_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR); + __raw_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR); mode = chan->mode | (chan->dev_id << DMA_DID_BIT); if (chan->irq) mode |= DMA_IE; - au_writel(~mode, chan->io + DMA_MODE_CLEAR); - au_writel(mode, chan->io + DMA_MODE_SET); + __raw_writel(~mode, chan->io + DMA_MODE_CLEAR); + __raw_writel(mode, chan->io + DMA_MODE_SET); } /* @@ -283,7 +283,7 @@ static inline int get_dma_active_buffer(unsigned int dmanr) if (!chan) return -1; - return (au_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0; + return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0; } /* @@ -304,7 +304,7 @@ static inline void set_dma_fifo_addr(unsigned int dmanr, unsigned int a) if (chan->dev_id != DMA_ID_GP04 && chan->dev_id != DMA_ID_GP05) return; - au_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR); + __raw_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR); } /* @@ -316,7 +316,7 @@ static inline void clear_dma_done0(unsigned int dmanr) if (!chan) return; - au_writel(DMA_D0, chan->io + DMA_MODE_CLEAR); + __raw_writel(DMA_D0, chan->io + DMA_MODE_CLEAR); } static inline void clear_dma_done1(unsigned int dmanr) @@ -325,7 +325,7 @@ static inline void clear_dma_done1(unsigned int dmanr) if (!chan) return; - au_writel(DMA_D1, chan->io + DMA_MODE_CLEAR); + __raw_writel(DMA_D1, chan->io + DMA_MODE_CLEAR); } /* @@ -344,7 +344,7 @@ static inline void set_dma_addr0(unsigned int dmanr, unsigned int a) if (!chan) return; - au_writel(a, chan->io + DMA_BUFFER0_START); + __raw_writel(a, chan->io + DMA_BUFFER0_START); } /* @@ -356,7 +356,7 @@ static inline void set_dma_addr1(unsigned int dmanr, unsigned int a) if (!chan) return; - au_writel(a, chan->io + DMA_BUFFER1_START); + __raw_writel(a, chan->io + DMA_BUFFER1_START); } @@ -370,7 +370,7 @@ static inline void set_dma_count0(unsigned int dmanr, unsigned int count) if (!chan) return; count &= DMA_COUNT_MASK; - au_writel(count, chan->io + DMA_BUFFER0_COUNT); + __raw_writel(count, chan->io + DMA_BUFFER0_COUNT); } /* @@ -383,7 +383,7 @@ static inline void set_dma_count1(unsigned int dmanr, unsigned int count) if (!chan) return; count &= DMA_COUNT_MASK; - au_writel(count, chan->io + DMA_BUFFER1_COUNT); + __raw_writel(count, chan->io + DMA_BUFFER1_COUNT); } /* @@ -396,8 +396,8 @@ static inline void set_dma_count(unsigned int dmanr, unsigned int count) if (!chan) return; count &= DMA_COUNT_MASK; - au_writel(count, chan->io + DMA_BUFFER0_COUNT); - au_writel(count, chan->io + DMA_BUFFER1_COUNT); + __raw_writel(count, chan->io + DMA_BUFFER0_COUNT); + __raw_writel(count, chan->io + DMA_BUFFER1_COUNT); } /* @@ -410,7 +410,7 @@ static inline unsigned int get_dma_buffer_done(unsigned int dmanr) if (!chan) return 0; - return au_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1); + return __raw_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1); } @@ -437,10 +437,10 @@ static inline int get_dma_residue(unsigned int dmanr) if (!chan) return 0; - curBufCntReg = (au_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? + curBufCntReg = (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? DMA_BUFFER1_COUNT : DMA_BUFFER0_COUNT; - count = au_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK; + count = __raw_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK; if ((chan->mode & DMA_DW_MASK) == DMA_DW16) count <<= 1; diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index 09f45e6afade..c5b6eef0efa7 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -8,6 +8,12 @@ #define __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H #define cpu_has_tlb 1 +#define cpu_has_tlbinv 0 +#define cpu_has_segments 0 +#define cpu_has_eva 0 +#define cpu_has_htw 0 +#define cpu_has_rixiex 0 +#define cpu_has_maar 0 #define cpu_has_4kex 1 #define cpu_has_3k_cache 0 #define cpu_has_4k_cache 1 @@ -28,6 +34,8 @@ #define cpu_has_mdmx 0 #define cpu_has_mips3d 0 #define cpu_has_smartmips 0 +#define cpu_has_rixi 0 +#define cpu_has_mmips 0 #define cpu_has_vtag_icache 0 #define cpu_has_dc_aliases 0 #define cpu_has_ic_fills_f_dc 1 @@ -50,4 +58,8 @@ #define cpu_dcache_line_size() 32 #define cpu_icache_line_size() 32 +#define cpu_has_perf_cntr_intr_bit 0 +#define cpu_has_vz 0 +#define cpu_has_msa 0 + #endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h index 796afd051c35..9785e4ebb450 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h @@ -25,20 +25,20 @@ #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) /* GPIO1 registers within SYS_ area */ -#define SYS_TRIOUTRD 0x100 -#define SYS_TRIOUTCLR 0x100 -#define SYS_OUTPUTRD 0x108 -#define SYS_OUTPUTSET 0x108 -#define SYS_OUTPUTCLR 0x10C -#define SYS_PINSTATERD 0x110 -#define SYS_PININPUTEN 0x110 +#define AU1000_SYS_TRIOUTRD 0x100 +#define AU1000_SYS_TRIOUTCLR 0x100 +#define AU1000_SYS_OUTPUTRD 0x108 +#define AU1000_SYS_OUTPUTSET 0x108 +#define AU1000_SYS_OUTPUTCLR 0x10C +#define AU1000_SYS_PINSTATERD 0x110 +#define AU1000_SYS_PININPUTEN 0x110 /* register offsets within GPIO2 block */ -#define GPIO2_DIR 0x00 -#define GPIO2_OUTPUT 0x08 -#define GPIO2_PINSTATE 0x0C -#define GPIO2_INTENABLE 0x10 -#define GPIO2_ENABLE 0x14 +#define AU1000_GPIO2_DIR 0x00 +#define AU1000_GPIO2_OUTPUT 0x08 +#define AU1000_GPIO2_PINSTATE 0x0C +#define AU1000_GPIO2_INTENABLE 0x10 +#define AU1000_GPIO2_ENABLE 0x14 struct gpio; @@ -217,26 +217,21 @@ static inline int au1200_irq_to_gpio(int irq) */ static inline void alchemy_gpio1_set_value(int gpio, int v) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; - __raw_writel(mask, base + r); - wmb(); + unsigned long r = v ? AU1000_SYS_OUTPUTSET : AU1000_SYS_OUTPUTCLR; + alchemy_wrsys(mask, r); } static inline int alchemy_gpio1_get_value(int gpio) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - return __raw_readl(base + SYS_PINSTATERD) & mask; + return alchemy_rdsys(AU1000_SYS_PINSTATERD) & mask; } static inline int alchemy_gpio1_direction_input(int gpio) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - __raw_writel(mask, base + SYS_TRIOUTCLR); - wmb(); + alchemy_wrsys(mask, AU1000_SYS_TRIOUTCLR); return 0; } @@ -279,13 +274,13 @@ static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); - unsigned long d = __raw_readl(base + GPIO2_DIR); + unsigned long d = __raw_readl(base + AU1000_GPIO2_DIR); if (to_out) d |= mask; else d &= ~mask; - __raw_writel(d, base + GPIO2_DIR); + __raw_writel(d, base + AU1000_GPIO2_DIR); wmb(); } @@ -294,14 +289,15 @@ static inline void alchemy_gpio2_set_value(int gpio, int v) void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); unsigned long mask; mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); - __raw_writel(mask, base + GPIO2_OUTPUT); + __raw_writel(mask, base + AU1000_GPIO2_OUTPUT); wmb(); } static inline int alchemy_gpio2_get_value(int gpio) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - return __raw_readl(base + GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); + return __raw_readl(base + AU1000_GPIO2_PINSTATE) & + (1 << (gpio - ALCHEMY_GPIO2_BASE)); } static inline int alchemy_gpio2_direction_input(int gpio) @@ -352,12 +348,12 @@ static inline int alchemy_gpio2_to_irq(int gpio) static inline void __alchemy_gpio2_mod_int(int gpio2, int en) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - unsigned long r = __raw_readl(base + GPIO2_INTENABLE); + unsigned long r = __raw_readl(base + AU1000_GPIO2_INTENABLE); if (en) r |= 1 << gpio2; else r &= ~(1 << gpio2); - __raw_writel(r, base + GPIO2_INTENABLE); + __raw_writel(r, base + AU1000_GPIO2_INTENABLE); wmb(); } @@ -434,9 +430,9 @@ static inline void alchemy_gpio2_disable_int(int gpio2) static inline void alchemy_gpio2_enable(void) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - __raw_writel(3, base + GPIO2_ENABLE); /* reset, clock enabled */ + __raw_writel(3, base + AU1000_GPIO2_ENABLE); /* reset, clock enabled */ wmb(); - __raw_writel(1, base + GPIO2_ENABLE); /* clock enabled */ + __raw_writel(1, base + AU1000_GPIO2_ENABLE); /* clock enabled */ wmb(); } @@ -448,7 +444,7 @@ static inline void alchemy_gpio2_enable(void) static inline void alchemy_gpio2_disable(void) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - __raw_writel(2, base + GPIO2_ENABLE); /* reset, clock disabled */ + __raw_writel(2, base + AU1000_GPIO2_ENABLE); /* reset, clock disabled */ wmb(); } diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h index bba7399a49a3..1f5643b89a91 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h @@ -18,6 +18,7 @@ enum bcm47xx_board { BCM47XX_BOARD_ASUS_WL300G, BCM47XX_BOARD_ASUS_WL320GE, BCM47XX_BOARD_ASUS_WL330GE, + BCM47XX_BOARD_ASUS_WL500G, BCM47XX_BOARD_ASUS_WL500GD, BCM47XX_BOARD_ASUS_WL500GPV1, BCM47XX_BOARD_ASUS_WL500GPV2, @@ -70,11 +71,15 @@ enum bcm47xx_board { BCM47XX_BOARD_LINKSYS_WRT310NV1, BCM47XX_BOARD_LINKSYS_WRT310NV2, BCM47XX_BOARD_LINKSYS_WRT54G3GV2, - BCM47XX_BOARD_LINKSYS_WRT54G, + BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101, + BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467, + BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708, BCM47XX_BOARD_LINKSYS_WRT610NV1, BCM47XX_BOARD_LINKSYS_WRT610NV2, BCM47XX_BOARD_LINKSYS_WRTSL54GS, + BCM47XX_BOARD_MICROSOFT_MN700, + BCM47XX_BOARD_MOTOROLA_WE800G, BCM47XX_BOARD_MOTOROLA_WR850GP, BCM47XX_BOARD_MOTOROLA_WR850GV2V3, diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index 3112f08f0c72..56bb19219d48 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -19,118 +19,68 @@ #define BCM6368_CPU_ID 0x6368 void __init bcm63xx_cpu_init(void); -u16 __bcm63xx_get_cpu_id(void); u8 bcm63xx_get_cpu_rev(void); unsigned int bcm63xx_get_cpu_freq(void); +static inline u16 __pure __bcm63xx_get_cpu_id(const u16 cpu_id) +{ + switch (cpu_id) { #ifdef CONFIG_BCM63XX_CPU_3368 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM3368_CPU_ID -# endif -# define BCMCPU_IS_3368() (bcm63xx_get_cpu_id() == BCM3368_CPU_ID) -#else -# define BCMCPU_IS_3368() (0) + case BCM3368_CPU_ID: #endif #ifdef CONFIG_BCM63XX_CPU_6328 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6328_CPU_ID -# endif -# define BCMCPU_IS_6328() (bcm63xx_get_cpu_id() == BCM6328_CPU_ID) -#else -# define BCMCPU_IS_6328() (0) + case BCM6328_CPU_ID: #endif #ifdef CONFIG_BCM63XX_CPU_6338 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6338_CPU_ID -# endif -# define BCMCPU_IS_6338() (bcm63xx_get_cpu_id() == BCM6338_CPU_ID) -#else -# define BCMCPU_IS_6338() (0) + case BCM6338_CPU_ID: #endif #ifdef CONFIG_BCM63XX_CPU_6345 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6345_CPU_ID -# endif -# define BCMCPU_IS_6345() (bcm63xx_get_cpu_id() == BCM6345_CPU_ID) -#else -# define BCMCPU_IS_6345() (0) + case BCM6345_CPU_ID: #endif #ifdef CONFIG_BCM63XX_CPU_6348 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6348_CPU_ID -# endif -# define BCMCPU_IS_6348() (bcm63xx_get_cpu_id() == BCM6348_CPU_ID) -#else -# define BCMCPU_IS_6348() (0) + case BCM6348_CPU_ID: #endif #ifdef CONFIG_BCM63XX_CPU_6358 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6358_CPU_ID -# endif -# define BCMCPU_IS_6358() (bcm63xx_get_cpu_id() == BCM6358_CPU_ID) -#else -# define BCMCPU_IS_6358() (0) + case BCM6358_CPU_ID: #endif #ifdef CONFIG_BCM63XX_CPU_6362 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6362_CPU_ID -# endif -# define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID) -#else -# define BCMCPU_IS_6362() (0) + case BCM6362_CPU_ID: #endif - #ifdef CONFIG_BCM63XX_CPU_6368 -# ifdef bcm63xx_get_cpu_id -# undef bcm63xx_get_cpu_id -# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() -# define BCMCPU_RUNTIME_DETECT -# else -# define bcm63xx_get_cpu_id() BCM6368_CPU_ID -# endif -# define BCMCPU_IS_6368() (bcm63xx_get_cpu_id() == BCM6368_CPU_ID) -#else -# define BCMCPU_IS_6368() (0) + case BCM6368_CPU_ID: #endif + break; + default: + unreachable(); + } -#ifndef bcm63xx_get_cpu_id -#error "No CPU support configured" -#endif + return cpu_id; +} + +extern u16 bcm63xx_cpu_id; + +static inline u16 __pure bcm63xx_get_cpu_id(void) +{ + const u16 cpu_id = bcm63xx_cpu_id; + + return __bcm63xx_get_cpu_id(cpu_id); +} + +#define BCMCPU_IS_3368() (bcm63xx_get_cpu_id() == BCM3368_CPU_ID) +#define BCMCPU_IS_6328() (bcm63xx_get_cpu_id() == BCM6328_CPU_ID) +#define BCMCPU_IS_6338() (bcm63xx_get_cpu_id() == BCM6338_CPU_ID) +#define BCMCPU_IS_6345() (bcm63xx_get_cpu_id() == BCM6345_CPU_ID) +#define BCMCPU_IS_6348() (bcm63xx_get_cpu_id() == BCM6348_CPU_ID) +#define BCMCPU_IS_6358() (bcm63xx_get_cpu_id() == BCM6358_CPU_ID) +#define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID) +#define BCMCPU_IS_6368() (bcm63xx_get_cpu_id() == BCM6368_CPU_ID) /* * While registers sets are (mostly) the same across 63xx CPU, base @@ -598,55 +548,6 @@ enum bcm63xx_regs_set { extern const unsigned long *bcm63xx_regs_base; -#define __GEN_RSET_BASE(__cpu, __rset) \ - case RSET_## __rset : \ - return BCM_## __cpu ##_## __rset ##_BASE; - -#define __GEN_RSET(__cpu) \ - switch (set) { \ - __GEN_RSET_BASE(__cpu, DSL_LMEM) \ - __GEN_RSET_BASE(__cpu, PERF) \ - __GEN_RSET_BASE(__cpu, TIMER) \ - __GEN_RSET_BASE(__cpu, WDT) \ - __GEN_RSET_BASE(__cpu, UART0) \ - __GEN_RSET_BASE(__cpu, UART1) \ - __GEN_RSET_BASE(__cpu, GPIO) \ - __GEN_RSET_BASE(__cpu, SPI) \ - __GEN_RSET_BASE(__cpu, HSSPI) \ - __GEN_RSET_BASE(__cpu, UDC0) \ - __GEN_RSET_BASE(__cpu, OHCI0) \ - __GEN_RSET_BASE(__cpu, OHCI_PRIV) \ - __GEN_RSET_BASE(__cpu, USBH_PRIV) \ - __GEN_RSET_BASE(__cpu, USBD) \ - __GEN_RSET_BASE(__cpu, USBDMA) \ - __GEN_RSET_BASE(__cpu, MPI) \ - __GEN_RSET_BASE(__cpu, PCMCIA) \ - __GEN_RSET_BASE(__cpu, PCIE) \ - __GEN_RSET_BASE(__cpu, DSL) \ - __GEN_RSET_BASE(__cpu, ENET0) \ - __GEN_RSET_BASE(__cpu, ENET1) \ - __GEN_RSET_BASE(__cpu, ENETDMA) \ - __GEN_RSET_BASE(__cpu, ENETDMAC) \ - __GEN_RSET_BASE(__cpu, ENETDMAS) \ - __GEN_RSET_BASE(__cpu, ENETSW) \ - __GEN_RSET_BASE(__cpu, EHCI0) \ - __GEN_RSET_BASE(__cpu, SDRAM) \ - __GEN_RSET_BASE(__cpu, MEMC) \ - __GEN_RSET_BASE(__cpu, DDR) \ - __GEN_RSET_BASE(__cpu, M2M) \ - __GEN_RSET_BASE(__cpu, ATM) \ - __GEN_RSET_BASE(__cpu, XTM) \ - __GEN_RSET_BASE(__cpu, XTMDMA) \ - __GEN_RSET_BASE(__cpu, XTMDMAC) \ - __GEN_RSET_BASE(__cpu, XTMDMAS) \ - __GEN_RSET_BASE(__cpu, PCM) \ - __GEN_RSET_BASE(__cpu, PCMDMA) \ - __GEN_RSET_BASE(__cpu, PCMDMAC) \ - __GEN_RSET_BASE(__cpu, PCMDMAS) \ - __GEN_RSET_BASE(__cpu, RNG) \ - __GEN_RSET_BASE(__cpu, MISC) \ - } - #define __GEN_CPU_REGS_TABLE(__cpu) \ [RSET_DSL_LMEM] = BCM_## __cpu ##_DSL_LMEM_BASE, \ [RSET_PERF] = BCM_## __cpu ##_PERF_BASE, \ @@ -693,36 +594,7 @@ extern const unsigned long *bcm63xx_regs_base; static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) { -#ifdef BCMCPU_RUNTIME_DETECT return bcm63xx_regs_base[set]; -#else -#ifdef CONFIG_BCM63XX_CPU_3368 - __GEN_RSET(3368) -#endif -#ifdef CONFIG_BCM63XX_CPU_6328 - __GEN_RSET(6328) -#endif -#ifdef CONFIG_BCM63XX_CPU_6338 - __GEN_RSET(6338) -#endif -#ifdef CONFIG_BCM63XX_CPU_6345 - __GEN_RSET(6345) -#endif -#ifdef CONFIG_BCM63XX_CPU_6348 - __GEN_RSET(6348) -#endif -#ifdef CONFIG_BCM63XX_CPU_6358 - __GEN_RSET(6358) -#endif -#ifdef CONFIG_BCM63XX_CPU_6362 - __GEN_RSET(6362) -#endif -#ifdef CONFIG_BCM63XX_CPU_6368 - __GEN_RSET(6368) -#endif -#endif - /* unreached */ - return 0; } /* diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h index 753953e86242..466fc85899f4 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h @@ -112,55 +112,9 @@ enum bcm63xx_regs_enetdmac { static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg) { -#ifdef BCMCPU_RUNTIME_DETECT extern const unsigned long *bcm63xx_regs_enetdmac; return bcm63xx_regs_enetdmac[reg]; -#else -#ifdef CONFIG_BCM63XX_CPU_6345 - switch (reg) { - case ENETDMAC_CHANCFG: - return ENETDMA_6345_CHANCFG_REG; - case ENETDMAC_IR: - return ENETDMA_6345_IR_REG; - case ENETDMAC_IRMASK: - return ENETDMA_6345_IRMASK_REG; - case ENETDMAC_MAXBURST: - return ENETDMA_6345_MAXBURST_REG; - case ENETDMAC_BUFALLOC: - return ENETDMA_6345_BUFALLOC_REG; - case ENETDMAC_RSTART: - return ENETDMA_6345_RSTART_REG; - case ENETDMAC_FC: - return ENETDMA_6345_FC_REG; - case ENETDMAC_LEN: - return ENETDMA_6345_LEN_REG; - } -#endif -#if defined(CONFIG_BCM63XX_CPU_6328) || \ - defined(CONFIG_BCM63XX_CPU_6338) || \ - defined(CONFIG_BCM63XX_CPU_6348) || \ - defined(CONFIG_BCM63XX_CPU_6358) || \ - defined(CONFIG_BCM63XX_CPU_6362) || \ - defined(CONFIG_BCM63XX_CPU_6368) - switch (reg) { - case ENETDMAC_CHANCFG: - return ENETDMAC_CHANCFG_REG; - case ENETDMAC_IR: - return ENETDMAC_IR_REG; - case ENETDMAC_IRMASK: - return ENETDMAC_IRMASK_REG; - case ENETDMAC_MAXBURST: - return ENETDMAC_MAXBURST_REG; - case ENETDMAC_BUFALLOC: - case ENETDMAC_RSTART: - case ENETDMAC_FC: - case ENETDMAC_LEN: - return 0; - } -#endif -#endif - return 0; } diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index c426cabc620a..25737655d141 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -30,26 +30,6 @@ enum bcm63xx_regs_spi { SPI_RX_DATA, }; -#define __GEN_SPI_RSET_BASE(__cpu, __rset) \ - case SPI_## __rset: \ - return SPI_## __cpu ##_## __rset; - -#define __GEN_SPI_RSET(__cpu) \ - switch (reg) { \ - __GEN_SPI_RSET_BASE(__cpu, CMD) \ - __GEN_SPI_RSET_BASE(__cpu, INT_STATUS) \ - __GEN_SPI_RSET_BASE(__cpu, INT_MASK_ST) \ - __GEN_SPI_RSET_BASE(__cpu, INT_MASK) \ - __GEN_SPI_RSET_BASE(__cpu, ST) \ - __GEN_SPI_RSET_BASE(__cpu, CLK_CFG) \ - __GEN_SPI_RSET_BASE(__cpu, FILL_BYTE) \ - __GEN_SPI_RSET_BASE(__cpu, MSG_TAIL) \ - __GEN_SPI_RSET_BASE(__cpu, RX_TAIL) \ - __GEN_SPI_RSET_BASE(__cpu, MSG_CTL) \ - __GEN_SPI_RSET_BASE(__cpu, MSG_DATA) \ - __GEN_SPI_RSET_BASE(__cpu, RX_DATA) \ - } - #define __GEN_SPI_REGS_TABLE(__cpu) \ [SPI_CMD] = SPI_## __cpu ##_CMD, \ [SPI_INT_STATUS] = SPI_## __cpu ##_INT_STATUS, \ @@ -66,20 +46,9 @@ enum bcm63xx_regs_spi { static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg) { -#ifdef BCMCPU_RUNTIME_DETECT extern const unsigned long *bcm63xx_regs_spi; return bcm63xx_regs_spi[reg]; -#else -#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348) - __GEN_SPI_RSET(6348) -#endif -#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \ - defined(CONFIG_BCM63XX_CPU_6368) - __GEN_SPI_RSET(6358) -#endif -#endif - return 0; } #endif /* BCM63XX_DEV_SPI_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index ab427f8814e6..4794067cb5a7 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -215,23 +215,23 @@ /* Interrupt Mask register */ #define PERF_IRQMASK_3368_REG 0xc -#define PERF_IRQMASK_6328_REG 0x20 +#define PERF_IRQMASK_6328_REG(x) (0x20 + (x) * 0x10) #define PERF_IRQMASK_6338_REG 0xc #define PERF_IRQMASK_6345_REG 0xc #define PERF_IRQMASK_6348_REG 0xc -#define PERF_IRQMASK_6358_REG 0xc -#define PERF_IRQMASK_6362_REG 0x20 -#define PERF_IRQMASK_6368_REG 0x20 +#define PERF_IRQMASK_6358_REG(x) (0xc + (x) * 0x2c) +#define PERF_IRQMASK_6362_REG(x) (0x20 + (x) * 0x10) +#define PERF_IRQMASK_6368_REG(x) (0x20 + (x) * 0x10) /* Interrupt Status register */ #define PERF_IRQSTAT_3368_REG 0x10 -#define PERF_IRQSTAT_6328_REG 0x28 +#define PERF_IRQSTAT_6328_REG(x) (0x28 + (x) * 0x10) #define PERF_IRQSTAT_6338_REG 0x10 #define PERF_IRQSTAT_6345_REG 0x10 #define PERF_IRQSTAT_6348_REG 0x10 -#define PERF_IRQSTAT_6358_REG 0x10 -#define PERF_IRQSTAT_6362_REG 0x28 -#define PERF_IRQSTAT_6368_REG 0x28 +#define PERF_IRQSTAT_6358_REG(x) (0x10 + (x) * 0x2c) +#define PERF_IRQSTAT_6362_REG(x) (0x28 + (x) * 0x10) +#define PERF_IRQSTAT_6368_REG(x) (0x28 + (x) * 0x10) /* External Interrupt Configuration register */ #define PERF_EXTIRQ_CFG_REG_3368 0x14 diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h index e9c408e8ff4c..bc1167dbd4e3 100644 --- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h @@ -24,7 +24,7 @@ #define cpu_has_smartmips 0 #define cpu_has_vtag_icache 0 -#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCM63XX_CPU_6348) || defined(CONFIG_BCM63XX_CPU_6345) || defined(CONFIG_BCM63XX_CPU_6338)) +#if !defined(CONFIG_SYS_HAS_CPU_BMIPS4350) #define cpu_has_dc_aliases 0 #endif diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index cf8022872892..fa1f3cfbae8d 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -57,6 +57,7 @@ #define cpu_has_vint 0 #define cpu_has_veic 0 #define cpu_hwrena_impl_bits 0xc0000000 +#define cpu_has_wsbh 1 #define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON) diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h index 5d6a76434d00..c4a912733b65 100644 --- a/arch/mips/include/asm/mach-ip28/spaces.h +++ b/arch/mips/include/asm/mach-ip28/spaces.h @@ -11,15 +11,8 @@ #ifndef _ASM_MACH_IP28_SPACES_H #define _ASM_MACH_IP28_SPACES_H -#define CAC_BASE _AC(0xa800000000000000, UL) - -#define HIGHMEM_START (~0UL) - #define PHYS_OFFSET _AC(0x20000000, UL) -#define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */ -#define IO_BASE UNCAC_BASE - #include <asm/mach-generic/spaces.h> #endif /* _ASM_MACH_IP28_SPACES_H */ diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h index 829a7ec185fb..3388fc53599e 100644 --- a/arch/mips/include/asm/mach-loongson/boot_param.h +++ b/arch/mips/include/asm/mach-loongson/boot_param.h @@ -146,6 +146,9 @@ struct boot_params { struct loongson_system_configuration { u32 nr_cpus; + u32 nr_nodes; + int cores_per_node; + int cores_per_package; enum loongson_cpu_type cputype; u64 ht_control_base; u64 pci_mem_start_addr; @@ -160,4 +163,5 @@ struct loongson_system_configuration { extern struct efi_memory_map_loongson *loongson_memmap; extern struct loongson_system_configuration loongson_sysconf; +extern int cpuhotplug_workaround; #endif diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index c0f3ef45c2c1..7d28f95b0512 100644 --- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -59,4 +59,6 @@ #define cpu_has_watch 1 #define cpu_has_local_ebase 0 +#define cpu_has_wsbh IS_ENABLED(CONFIG_CPU_LOONGSON3) + #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-loongson/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson/kernel-entry-init.h new file mode 100644 index 000000000000..df5fca8eeb80 --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/kernel-entry-init.h @@ -0,0 +1,52 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 Embedded Alley Solutions, Inc + * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2009 Jiajie Chen (chenjiajie@cse.buaa.edu.cn) + * Copyright (C) 2012 Huacai Chen (chenhc@lemote.com) + */ +#ifndef __ASM_MACH_LOONGSON_KERNEL_ENTRY_H +#define __ASM_MACH_LOONGSON_KERNEL_ENTRY_H + +/* + * Override macros used in arch/mips/kernel/head.S. + */ + .macro kernel_entry_setup +#ifdef CONFIG_CPU_LOONGSON3 + .set push + .set mips64 + /* Set LPA on LOONGSON3 config3 */ + mfc0 t0, $16, 3 + or t0, (0x1 << 7) + mtc0 t0, $16, 3 + /* Set ELPA on LOONGSON3 pagegrain */ + li t0, (0x1 << 29) + mtc0 t0, $5, 1 + _ehb + .set pop +#endif + .endm + +/* + * Do SMP slave processor setup. + */ + .macro smp_slave_setup +#ifdef CONFIG_CPU_LOONGSON3 + .set push + .set mips64 + /* Set LPA on LOONGSON3 config3 */ + mfc0 t0, $16, 3 + or t0, (0x1 << 7) + mtc0 t0, $16, 3 + /* Set ELPA on LOONGSON3 pagegrain */ + li t0, (0x1 << 29) + mtc0 t0, $5, 1 + _ehb + .set pop +#endif + .endm + +#endif /* __ASM_MACH_LOONGSON_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h index f3fd1eb8e3dd..92bf76c21441 100644 --- a/arch/mips/include/asm/mach-loongson/loongson.h +++ b/arch/mips/include/asm/mach-loongson/loongson.h @@ -249,8 +249,15 @@ static inline void do_perfcnt_IRQ(void) #define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68) #define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c) -/* Chip Config */ -#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80) +#define MAX_PACKAGES 4 + +/* Chip Config registor of each physical cpu package, PRid >= Loongson-2F */ +extern u64 loongson_chipcfg[MAX_PACKAGES]; +#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id])) + +/* Freq Control register of each physical cpu package, PRid >= Loongson-3B */ +extern u64 loongson_freqctrl[MAX_PACKAGES]; +#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id])) /* pcimap */ diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h index 1b1f592fa2be..228e37847a36 100644 --- a/arch/mips/include/asm/mach-loongson/machine.h +++ b/arch/mips/include/asm/mach-loongson/machine.h @@ -24,10 +24,10 @@ #endif -#ifdef CONFIG_LEMOTE_MACH3A +#ifdef CONFIG_LOONGSON_MACH3X #define LOONGSON_MACHTYPE MACH_LEMOTE_A1101 -#endif /* CONFIG_LEMOTE_MACH3A */ +#endif /* CONFIG_LOONGSON_MACH3X */ #endif /* __ASM_MACH_LOONGSON_MACHINE_H */ diff --git a/arch/mips/include/asm/mach-loongson/mmzone.h b/arch/mips/include/asm/mach-loongson/mmzone.h new file mode 100644 index 000000000000..37c08a27b4f0 --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/mmzone.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & + * Insititute of Computing Technology + * Author: Xiang Gao, gaoxiang@ict.ac.cn + * Huacai Chen, chenhc@lemote.com + * Xiaofu Meng, Shuangshuang Zhang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_MACH_MMZONE_H +#define _ASM_MACH_MMZONE_H + +#include <boot_param.h> +#define NODE_ADDRSPACE_SHIFT 44 +#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL +#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL +#define NODE2_ADDRSPACE_OFFSET 0x200000000000UL +#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL + +#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) + +#define LEVELS_PER_SLICE 128 + +struct slice_data { + unsigned long irq_enable_mask[2]; + int level_to_irq[LEVELS_PER_SLICE]; +}; + +struct hub_data { + cpumask_t h_cpus; + unsigned long slice_map; + unsigned long irq_alloc_mask[2]; + struct slice_data slice[2]; +}; + +struct node_data { + struct pglist_data pglist; + struct hub_data hub; + cpumask_t cpumask; +}; + +extern struct node_data *__node_data[]; + +#define NODE_DATA(n) (&__node_data[(n)]->pglist) +#define hub_data(n) (&__node_data[(n)]->hub) + +extern void setup_zero_pages(void); +extern void __init prom_init_numa_memory(void); + +#endif /* _ASM_MACH_MMZONE_H */ diff --git a/arch/mips/include/asm/mach-loongson/topology.h b/arch/mips/include/asm/mach-loongson/topology.h new file mode 100644 index 000000000000..5598ba77d2ef --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/topology.h @@ -0,0 +1,23 @@ +#ifndef _ASM_MACH_TOPOLOGY_H +#define _ASM_MACH_TOPOLOGY_H + +#ifdef CONFIG_NUMA + +#define cpu_to_node(cpu) ((cpu) >> 2) +#define parent_node(node) (node) +#define cpumask_of_node(node) (&__node_data[(node)]->cpumask) + +struct pci_bus; +extern int pcibus_to_node(struct pci_bus *); + +#define cpumask_of_pcibus(bus) (cpu_online_mask) + +extern unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; + +#define node_distance(from, to) (__node_distances[(from)][(to)]) + +#endif + +#include <asm-generic/topology.h> + +#endif /* _ASM_MACH_TOPOLOGY_H */ diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h index 47cfe64efbb0..f2c13d211abb 100644 --- a/arch/mips/include/asm/mach-malta/irq.h +++ b/arch/mips/include/asm/mach-malta/irq.h @@ -2,6 +2,7 @@ #define __ASM_MACH_MIPS_IRQ_H +#define GIC_NUM_INTRS (24 + NR_CPUS * 2) #define NR_IRQS 256 #include_next <irq.h> diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h index 77eeda77e73c..0cf8622db27f 100644 --- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h @@ -10,14 +10,15 @@ #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H +#include <asm/regdef.h> +#include <asm/mipsregs.h> + /* * Prepare segments for EVA boot: * * This is in case the processor boots in legacy configuration * (SI_EVAReset is de-asserted and CONFIG5.K == 0) * - * On entry, t1 is loaded with CP0_CONFIG - * * ========================= Mappings ============================= * Virtual memory Physical memory Mapping * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg) @@ -30,12 +31,20 @@ * * * Lowmem is expanded to 2GB + * + * The following code uses the t0, t1, t2 and ra registers without + * previously preserving them. + * */ - .macro eva_entry + .macro platform_eva_init + + .set push + .set reorder /* * Get Config.K0 value and use it to program * the segmentation registers */ + mfc0 t1, CP0_CONFIG andi t1, 0x7 /* CCA */ move t2, t1 ins t2, t1, 16, 3 @@ -77,6 +86,8 @@ mtc0 t0, $16, 5 sync jal mips_ihb + + .set pop .endm .macro kernel_entry_setup @@ -95,7 +106,7 @@ sll t0, t0, 6 /* SC bit */ bgez t0, 9f - eva_entry + platform_eva_init b 0f 9: /* Assume we came from YAMON... */ @@ -127,8 +138,7 @@ nonsc_processor: #ifdef CONFIG_EVA sync ehb - mfc0 t1, CP0_CONFIG - eva_entry + platform_eva_init #endif .endm diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h index ceeb1f5e7129..0eb43c832b25 100644 --- a/arch/mips/include/asm/mach-netlogic/topology.h +++ b/arch/mips/include/asm/mach-netlogic/topology.h @@ -10,13 +10,6 @@ #include <asm/mach-netlogic/multi-node.h> -#ifdef CONFIG_SMP -#define topology_physical_package_id(cpu) cpu_to_node(cpu) -#define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE) -#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) -#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) -#endif - #include <asm-generic/topology.h> #endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */ diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h index 5d154cfbcf4c..d8106f75b9af 100644 --- a/arch/mips/include/asm/mach-sead3/irq.h +++ b/arch/mips/include/asm/mach-sead3/irq.h @@ -1,6 +1,7 @@ #ifndef __ASM_MACH_MIPS_IRQ_H #define __ASM_MACH_MIPS_IRQ_H +#define GIC_NUM_INTRS (24 + NR_CPUS * 2) #define NR_IRQS 256 diff --git a/arch/mips/include/asm/mips-boards/bonito64.h b/arch/mips/include/asm/mips-boards/bonito64.h index b2048d1bcc1c..5368891d424b 100644 --- a/arch/mips/include/asm/mips-boards/bonito64.h +++ b/arch/mips/include/asm/mips-boards/bonito64.h @@ -414,7 +414,6 @@ extern unsigned long _pcictrl_bonito_pcicfg; #define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT) -#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT) #define BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT) #define BONITO_PCITOPHYS(WIN, ADDR, CFG) ( \ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 98e9754a4b6b..cf3b580c3df6 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -265,6 +265,7 @@ #define PG_XIE (_ULCAST_(1) << 30) #define PG_ELPA (_ULCAST_(1) << 29) #define PG_ESP (_ULCAST_(1) << 28) +#define PG_IEC (_ULCAST_(1) << 27) /* * R4x00 interrupt enable / cause bits @@ -630,7 +631,6 @@ #define MIPS_CONF4_MMUSIZEEXT_SHIFT (0) #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) #define MIPS_CONF4_FTLBSETS_SHIFT (0) -#define MIPS_CONF4_FTLBSETS_SHIFT (0) #define MIPS_CONF4_FTLBSETS (_ULCAST_(15) << MIPS_CONF4_FTLBSETS_SHIFT) #define MIPS_CONF4_FTLBWAYS_SHIFT (4) #define MIPS_CONF4_FTLBWAYS (_ULCAST_(15) << MIPS_CONF4_FTLBWAYS_SHIFT) @@ -652,6 +652,7 @@ #define MIPS_CONF5_NF (_ULCAST_(1) << 0) #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) +#define MIPS_CONF5_MRP (_ULCAST_(1) << 3) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) #define MIPS_CONF5_CV (_ULCAST_(1) << 29) @@ -668,6 +669,12 @@ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) +/* MAAR bit definitions */ +#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) +#define MIPS_MAAR_ADDR_SHIFT 12 +#define MIPS_MAAR_S (_ULCAST_(1) << 1) +#define MIPS_MAAR_V (_ULCAST_(1) << 0) + /* EntryHI bit definition */ #define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10) @@ -706,6 +713,37 @@ #define MIPS_SEGCFG_MK _ULCAST_(1) #define MIPS_SEGCFG_UK _ULCAST_(0) +#define MIPS_PWFIELD_GDI_SHIFT 24 +#define MIPS_PWFIELD_GDI_MASK 0x3f000000 +#define MIPS_PWFIELD_UDI_SHIFT 18 +#define MIPS_PWFIELD_UDI_MASK 0x00fc0000 +#define MIPS_PWFIELD_MDI_SHIFT 12 +#define MIPS_PWFIELD_MDI_MASK 0x0003f000 +#define MIPS_PWFIELD_PTI_SHIFT 6 +#define MIPS_PWFIELD_PTI_MASK 0x00000fc0 +#define MIPS_PWFIELD_PTEI_SHIFT 0 +#define MIPS_PWFIELD_PTEI_MASK 0x0000003f + +#define MIPS_PWSIZE_GDW_SHIFT 24 +#define MIPS_PWSIZE_GDW_MASK 0x3f000000 +#define MIPS_PWSIZE_UDW_SHIFT 18 +#define MIPS_PWSIZE_UDW_MASK 0x00fc0000 +#define MIPS_PWSIZE_MDW_SHIFT 12 +#define MIPS_PWSIZE_MDW_MASK 0x0003f000 +#define MIPS_PWSIZE_PTW_SHIFT 6 +#define MIPS_PWSIZE_PTW_MASK 0x00000fc0 +#define MIPS_PWSIZE_PTEW_SHIFT 0 +#define MIPS_PWSIZE_PTEW_MASK 0x0000003f + +#define MIPS_PWCTL_PWEN_SHIFT 31 +#define MIPS_PWCTL_PWEN_MASK 0x80000000 +#define MIPS_PWCTL_DPH_SHIFT 7 +#define MIPS_PWCTL_DPH_MASK 0x00000080 +#define MIPS_PWCTL_HUGEPG_SHIFT 6 +#define MIPS_PWCTL_HUGEPG_MASK 0x00000060 +#define MIPS_PWCTL_PSN_SHIFT 0 +#define MIPS_PWCTL_PSN_MASK 0x0000003f + #ifndef __ASSEMBLY__ /* @@ -1044,6 +1082,11 @@ do { \ #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) +#define read_c0_maar() __read_ulong_c0_register($17, 1) +#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) +#define read_c0_maari() __read_32bit_c0_register($17, 2) +#define write_c0_maari(val) __write_32bit_c0_register($17, 2, val) + /* * The WatchLo register. There may be up to 8 of them. */ @@ -1201,6 +1244,19 @@ do { \ #define read_c0_segctl2() __read_32bit_c0_register($5, 4) #define write_c0_segctl2(val) __write_32bit_c0_register($5, 4, val) +/* Hardware Page Table Walker */ +#define read_c0_pwbase() __read_ulong_c0_register($5, 5) +#define write_c0_pwbase(val) __write_ulong_c0_register($5, 5, val) + +#define read_c0_pwfield() __read_ulong_c0_register($5, 6) +#define write_c0_pwfield(val) __write_ulong_c0_register($5, 6, val) + +#define read_c0_pwsize() __read_ulong_c0_register($5, 7) +#define write_c0_pwsize(val) __write_ulong_c0_register($5, 7, val) + +#define read_c0_pwctl() __read_32bit_c0_register($6, 6) +#define write_c0_pwctl(val) __write_32bit_c0_register($6, 6, val) + /* Cavium OCTEON (cnMIPS) */ #define read_c0_cvmcount() __read_ulong_c0_register($9, 6) #define write_c0_cvmcount(val) __write_ulong_c0_register($9, 6, val) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 2e373da5f8e9..2f82568a3ee4 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -20,10 +20,20 @@ #include <asm/tlbflush.h> #include <asm-generic/mm_hooks.h> +#define htw_set_pwbase(pgd) \ +do { \ + if (cpu_has_htw) { \ + write_c0_pwbase(pgd); \ + back_to_back_c0_hazard(); \ + htw_reset(); \ + } \ +} while (0) + #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ do { \ extern void tlbmiss_handler_setup_pgd(unsigned long); \ tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ + htw_set_pwbase((unsigned long)pgd); \ } while (0) #ifdef CONFIG_MIPS_PGD_C0_CONTEXT diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h index 538f6d482db8..af5638b12c75 100644 --- a/arch/mips/include/asm/msa.h +++ b/arch/mips/include/asm/msa.h @@ -12,8 +12,11 @@ #include <asm/mipsregs.h> +#ifndef __ASSEMBLY__ + extern void _save_msa(struct task_struct *); extern void _restore_msa(struct task_struct *); +extern void _init_msa_upper(void); static inline void enable_msa(void) { @@ -112,10 +115,10 @@ static inline unsigned int read_msa_##name(void) \ " .set push\n" \ " .set noat\n" \ " .insn\n" \ - " .word #CFC_MSA_INSN | (" #cs " << 11)\n" \ + " .word %1 | (" #cs " << 11)\n" \ " move %0, $1\n" \ " .set pop\n" \ - : "=r"(reg)); \ + : "=r"(reg) : "i"(CFC_MSA_INSN)); \ return reg; \ } \ \ @@ -126,22 +129,13 @@ static inline void write_msa_##name(unsigned int val) \ " .set noat\n" \ " move $1, %0\n" \ " .insn\n" \ - " .word #CTC_MSA_INSN | (" #cs " << 6)\n" \ + " .word %1 | (" #cs " << 6)\n" \ " .set pop\n" \ - : : "r"(val)); \ + : : "r"(val), "i"(CTC_MSA_INSN)); \ } #endif /* !TOOLCHAIN_SUPPORTS_MSA */ -#define MSA_IR 0 -#define MSA_CSR 1 -#define MSA_ACCESS 2 -#define MSA_SAVE 3 -#define MSA_MODIFY 4 -#define MSA_REQUEST 5 -#define MSA_MAP 6 -#define MSA_UNMAP 7 - __BUILD_MSA_CTL_REG(ir, 0) __BUILD_MSA_CTL_REG(csr, 1) __BUILD_MSA_CTL_REG(access, 2) @@ -151,6 +145,17 @@ __BUILD_MSA_CTL_REG(request, 5) __BUILD_MSA_CTL_REG(map, 6) __BUILD_MSA_CTL_REG(unmap, 7) +#endif /* !__ASSEMBLY__ */ + +#define MSA_IR 0 +#define MSA_CSR 1 +#define MSA_ACCESS 2 +#define MSA_SAVE 3 +#define MSA_MODIFY 4 +#define MSA_REQUEST 5 +#define MSA_MAP 6 +#define MSA_UNMAP 7 + /* MSA Implementation Register (MSAIR) */ #define MSA_IR_REVB 0 #define MSA_IR_REVF (_ULCAST_(0xff) << MSA_IR_REVB) diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index 7b7818d1e4d5..2298199a287e 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -228,6 +228,7 @@ enum cvmx_board_types_enum { */ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001, CVMX_BOARD_TYPE_UBNT_E100 = 20002, + CVMX_BOARD_TYPE_CUST_DSR1000N = 20006, CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, /* The remaining range is reserved for future use. */ @@ -327,6 +328,7 @@ static inline const char *cvmx_board_type_to_string(enum /* Customer private range */ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) } return "Unsupported Board"; diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 5699ec3a71af..3be81803595d 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -37,7 +37,7 @@ /* * This is used for calculating the real page sizes - * for FTLB or VTLB + FTLB confugrations. + * for FTLB or VTLB + FTLB configurations. */ static inline unsigned int page_size_ftlb(unsigned int mmuextdef) { @@ -223,7 +223,8 @@ static inline int pfn_valid(unsigned long pfn) #endif -#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) +#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys((void *) \ + (kaddr)))) extern int __virt_addr_valid(const volatile void *kaddr); #define virt_addr_valid(kaddr) \ diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index b4204c179b97..cd7d6064bcbe 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -18,6 +18,18 @@ #include <asm-generic/pgtable-nopmd.h> +extern int temp_tlb_entry __cpuinitdata; + +/* + * - add_temporary_entry() add a temporary TLB entry. We use TLB entries + * starting at the top and working down. This is for populating the + * TLB before trap_init() puts the TLB miss handler in place. It + * should be used only for entries matching the actual page tables, + * to prevent inconsistencies. + */ +extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask); + /* * Basically we have the same two-level (which is the logical three level * Linux page table layout folded) page tables as the i386. Some day diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index e592f3687d6f..e747bfa0be7e 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -224,38 +224,52 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #define _CACHE_CACHABLE_NONCOHERENT 0 +#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED #elif defined(CONFIG_CPU_SB1) /* No penalty for being coherent on the SB1, so just use it for "noncoherent" spaces, too. Shouldn't hurt. */ -#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) -#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) -#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) #elif defined(CONFIG_CPU_LOONGSON3) /* Using COHERENT flag for NONCOHERENT doesn't hurt. */ -#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* LOONGSON */ #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */ #define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */ -#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* LOONGSON */ -#else +#elif defined(CONFIG_MACH_JZ4740) + +/* Ingenic uses the WA bit to achieve write-combine memory writes */ +#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) -#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */ -#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */ -#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */ -#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */ -#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */ -#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */ -#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */ -#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */ -#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */ +#endif +#ifndef _CACHE_CACHABLE_NO_WA +#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_WA +#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_UNCACHED +#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_NONCOHERENT +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_CE +#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_COW +#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_CUW +#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_UNCACHED_ACCELERATED +#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) #endif #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 539ddd148bbb..d6d1928539b1 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -97,6 +97,34 @@ extern void paging_init(void); #define pmd_page_vaddr(pmd) pmd_val(pmd) +#define htw_stop() \ +do { \ + if (cpu_has_htw) \ + write_c0_pwctl(read_c0_pwctl() & \ + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ +} while(0) + +#define htw_start() \ +do { \ + if (cpu_has_htw) \ + write_c0_pwctl(read_c0_pwctl() | \ + (1 << MIPS_PWCTL_PWEN_SHIFT)); \ +} while(0) + + +#define htw_reset() \ +do { \ + if (cpu_has_htw) { \ + htw_stop(); \ + back_to_back_c0_hazard(); \ + htw_start(); \ + back_to_back_c0_hazard(); \ + } \ +} while(0) + +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pteval); + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) @@ -120,7 +148,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -131,6 +158,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt null.pte_low = null.pte_high = _PAGE_GLOBAL; set_pte_at(mm, addr, ptep, null); + htw_reset(); } #else @@ -157,7 +185,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) } #endif } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -168,6 +195,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt else #endif set_pte_at(mm, addr, ptep, __pte(0)); + htw_reset(); } #endif @@ -338,6 +366,16 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot) return __pgprot(prot); } +static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ + prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; + + return __pgprot(prot); +} + /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -363,15 +401,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); -extern void __update_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; __update_tlb(vma, address, pte); - __update_cache(vma, address, pte); } static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index ad70cba8daff..f1df4cb4a286 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -238,7 +238,13 @@ typedef struct { unsigned long seg; } mm_segment_t; -#define ARCH_MIN_TASKALIGN 8 +#ifdef CONFIG_CPU_HAS_MSA +# define ARCH_MIN_TASKALIGN 16 +# define FPU_ALIGN __aligned(16) +#else +# define ARCH_MIN_TASKALIGN 8 +# define FPU_ALIGN +#endif struct mips_abi; @@ -255,7 +261,7 @@ struct thread_struct { unsigned long cp0_status; /* Saved fpu/fpu emulator stuff. */ - struct mips_fpu_struct fpu; + struct mips_fpu_struct fpu FPU_ALIGN; #ifdef CONFIG_MIPS_MT_FPAFF /* Emulated instruction count */ unsigned long emulated_fp; @@ -367,6 +373,7 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) #define cpu_relax() barrier() +#define cpu_relax_lowlatency() cpu_relax() /* * Return_address is a replacement for __builtin_return_address(count) @@ -390,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p); #define ARCH_HAS_PREFETCHW #define prefetchw(x) __builtin_prefetch((x), 1, 1) -/* - * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP - * systems. - */ -#define __ARCH_WANT_UNLOCKED_CTXSW - #endif #endif /* _ASM_PROCESSOR_H */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 7e6e682aece3..fc783f843bdc 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -23,7 +23,7 @@ struct pt_regs { #ifdef CONFIG_32BIT /* Pad bytes for argument save space on the stack. */ - unsigned long pad0[6]; + unsigned long pad0[8]; #endif /* Saved main processor registers. */ @@ -47,8 +47,10 @@ struct pt_regs { struct task_struct; -extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); -extern int ptrace_setregs(struct task_struct *child, __s64 __user *data); +extern int ptrace_getregs(struct task_struct *child, + struct user_pt_regs __user *data); +extern int ptrace_setregs(struct task_struct *child, + struct user_pt_regs __user *data); extern int ptrace_getfpregs(struct task_struct *child, __u32 __user *data); extern int ptrace_setfpregs(struct task_struct *child, __u32 __user *data); diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 0b8bd28a0df1..4520adc8699b 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -19,6 +19,9 @@ #include <asm/mipsmtregs.h> #include <asm/uaccess.h> /* for segment_eq() */ +extern void (*r4k_blast_dcache)(void); +extern void (*r4k_blast_icache)(void); + /* * This macro return a properly sign-extended address suitable as base address * for indexed cache operations. Two issues here: diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h index 910e71a12466..84dc7e2e27a8 100644 --- a/arch/mips/include/asm/reg.h +++ b/arch/mips/include/asm/reg.h @@ -1,128 +1 @@ -/* - * Various register offset definitions for debuggers, core file - * examiners and whatnot. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995, 1999 Ralf Baechle - * Copyright (C) 1995, 1999 Silicon Graphics - */ -#ifndef __ASM_MIPS_REG_H -#define __ASM_MIPS_REG_H - - -#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H) - -#define EF_R0 6 -#define EF_R1 7 -#define EF_R2 8 -#define EF_R3 9 -#define EF_R4 10 -#define EF_R5 11 -#define EF_R6 12 -#define EF_R7 13 -#define EF_R8 14 -#define EF_R9 15 -#define EF_R10 16 -#define EF_R11 17 -#define EF_R12 18 -#define EF_R13 19 -#define EF_R14 20 -#define EF_R15 21 -#define EF_R16 22 -#define EF_R17 23 -#define EF_R18 24 -#define EF_R19 25 -#define EF_R20 26 -#define EF_R21 27 -#define EF_R22 28 -#define EF_R23 29 -#define EF_R24 30 -#define EF_R25 31 - -/* - * k0/k1 unsaved - */ -#define EF_R26 32 -#define EF_R27 33 - -#define EF_R28 34 -#define EF_R29 35 -#define EF_R30 36 -#define EF_R31 37 - -/* - * Saved special registers - */ -#define EF_LO 38 -#define EF_HI 39 - -#define EF_CP0_EPC 40 -#define EF_CP0_BADVADDR 41 -#define EF_CP0_STATUS 42 -#define EF_CP0_CAUSE 43 -#define EF_UNUSED0 44 - -#define EF_SIZE 180 - -#endif - -#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H) - -#define EF_R0 0 -#define EF_R1 1 -#define EF_R2 2 -#define EF_R3 3 -#define EF_R4 4 -#define EF_R5 5 -#define EF_R6 6 -#define EF_R7 7 -#define EF_R8 8 -#define EF_R9 9 -#define EF_R10 10 -#define EF_R11 11 -#define EF_R12 12 -#define EF_R13 13 -#define EF_R14 14 -#define EF_R15 15 -#define EF_R16 16 -#define EF_R17 17 -#define EF_R18 18 -#define EF_R19 19 -#define EF_R20 20 -#define EF_R21 21 -#define EF_R22 22 -#define EF_R23 23 -#define EF_R24 24 -#define EF_R25 25 - -/* - * k0/k1 unsaved - */ -#define EF_R26 26 -#define EF_R27 27 - - -#define EF_R28 28 -#define EF_R29 29 -#define EF_R30 30 -#define EF_R31 31 - -/* - * Saved special registers - */ -#define EF_LO 32 -#define EF_HI 33 - -#define EF_CP0_EPC 34 -#define EF_CP0_BADVADDR 35 -#define EF_CP0_STATUS 36 -#define EF_CP0_CAUSE 37 - -#define EF_SIZE 304 /* size in bytes */ - -#endif /* CONFIG_64BIT */ - -#endif /* __ASM_MIPS_REG_H */ +#include <uapi/asm/reg.h> diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h index a06a08a9afc6..326c16ebd589 100644 --- a/arch/mips/include/asm/smp-cps.h +++ b/arch/mips/include/asm/smp-cps.h @@ -31,11 +31,19 @@ extern void mips_cps_core_init(void); extern struct vpe_boot_config *mips_cps_boot_vpes(void); -extern bool mips_cps_smp_in_use(void); - extern void mips_cps_pm_save(void); extern void mips_cps_pm_restore(void); +#ifdef CONFIG_MIPS_CPS + +extern bool mips_cps_smp_in_use(void); + +#else /* !CONFIG_MIPS_CPS */ + +static inline bool mips_cps_smp_in_use(void) { return false; } + +#endif /* !CONFIG_MIPS_CPS */ + #else /* __ASSEMBLY__ */ .extern mips_cps_bootcfg; diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index b037334fca22..eacf865d21c2 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -22,6 +22,7 @@ extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; +extern cpumask_t cpu_core_map[]; #define raw_smp_processor_id() (current_thread_info()->cpu) diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h index d2da53c2c2f8..b1071c1e54f5 100644 --- a/arch/mips/include/asm/sparsemem.h +++ b/arch/mips/include/asm/sparsemem.h @@ -11,7 +11,7 @@ #else # define SECTION_SIZE_BITS 28 #endif -#define MAX_PHYSMEM_BITS 35 +#define MAX_PHYSMEM_BITS 48 #endif /* CONFIG_SPARSEMEM */ #endif /* _MIPS_SPARSEMEM_H */ diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h deleted file mode 100644 index 3adac3b53d19..000000000000 --- a/arch/mips/include/asm/suspend.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SUSPEND_H -#define __ASM_SUSPEND_H - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - -#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 495c1041a2cc..b928b6f898cd 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -92,7 +92,7 @@ do { \ KSTK_STATUS(prev) &= ~ST0_CU2; \ __c0_stat = read_c0_status(); \ write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(&prev->thread.cp2); \ + cop2_save(prev); \ write_c0_status(__c0_stat & ~ST0_CU2); \ } \ __clear_software_ll_bit(); \ @@ -111,7 +111,7 @@ do { \ (KSTK_STATUS(current) & ST0_CU2)) { \ __c0_stat = read_c0_status(); \ write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(¤t->thread.cp2); \ + cop2_restore(current); \ write_c0_status(__c0_stat & ~ST0_CU2); \ } \ if (cpu_has_dsp) \ diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 93b3b86c293c..bb7963753730 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -131,10 +131,12 @@ static inline int syscall_get_arch(void) { int arch = AUDIT_ARCH_MIPS; #ifdef CONFIG_64BIT - if (!test_thread_flag(TIF_32BIT_REGS)) + if (!test_thread_flag(TIF_32BIT_REGS)) { arch |= __AUDIT_ARCH_64BIT; - if (test_thread_flag(TIF_32BIT_ADDR)) - arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; + /* N32 sets only TIF_32BIT_ADDR */ + if (test_thread_flag(TIF_32BIT_ADDR)) + arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; + } #endif #if defined(__LITTLE_ENDIAN) arch |= __AUDIT_ARCH_LE; diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 20ea4859c822..3e307ec2afba 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h @@ -9,5 +9,13 @@ #define __ASM_TOPOLOGY_H #include <topology.h> +#include <linux/smp.h> + +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu) (cpu_data[cpu].package) +#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) +#endif #endif /* __ASM_TOPOLOGY_H */ diff --git a/arch/mips/include/asm/user.h b/arch/mips/include/asm/user.h deleted file mode 100644 index 6bad61b0a53a..000000000000 --- a/arch/mips/include/asm/user.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle - */ -#ifndef _ASM_USER_H -#define _ASM_USER_H - -#include <asm/page.h> -#include <asm/reg.h> - -/* - * Core file format: The core file is written in such a way that gdb - * can understand it and provide useful information to the user (under - * linux we use the `trad-core' bfd, NOT the irix-core). The file - * contents are as follows: - * - * upage: 1 page consisting of a user struct that tells gdb - * what is present in the file. Directly after this is a - * copy of the task_struct, which is currently not used by gdb, - * but it may come in handy at some point. All of the registers - * are stored as part of the upage. The upage should always be - * only one page long. - * data: The data segment follows next. We use current->end_text to - * current->brk to pick up all of the user variables, plus any memory - * that may have been sbrk'ed. No attempt is made to determine if a - * page is demand-zero or if a page is totally unused, we just cover - * the entire range. All of the addresses are rounded in such a way - * that an integral number of pages is written. - * stack: We need the stack information in order to get a meaningful - * backtrace. We need to write the data from usp to - * current->start_stack, so we round each of these in order to be able - * to write an integer number of pages. - */ -struct user { - unsigned long regs[EF_SIZE / /* integer and fp regs */ - sizeof(unsigned long) + 64]; - size_t u_tsize; /* text size (pages) */ - size_t u_dsize; /* data size (pages) */ - size_t u_ssize; /* stack size (pages) */ - unsigned long start_code; /* text starting address */ - unsigned long start_data; /* data starting address */ - unsigned long start_stack; /* stack starting address */ - long int signal; /* signal causing core dump */ - unsigned long u_ar0; /* help gdb find registers */ - unsigned long magic; /* identifies a core file */ - char u_comm[32]; /* user command name */ -}; - -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif /* _ASM_USER_H */ diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h index b1e637757fe3..740219c2c894 100644 --- a/arch/mips/include/uapi/asm/ioctls.h +++ b/arch/mips/include/uapi/asm/ioctls.h @@ -81,6 +81,8 @@ #define TCSETS2 _IOW('T', 0x2B, struct termios2) #define TCSETSW2 _IOW('T', 0x2C, struct termios2) #define TCSETSF2 _IOW('T', 0x2D, struct termios2) +#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485) +#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485) #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h index b26f7e317279..bbcfb8ba8106 100644 --- a/arch/mips/include/uapi/asm/ptrace.h +++ b/arch/mips/include/uapi/asm/ptrace.h @@ -22,24 +22,27 @@ #define DSP_CONTROL 77 #define ACX 78 -#ifndef __KERNEL__ /* - * This struct defines the way the registers are stored on the stack during a - * system call/exception. As usual the registers k0/k1 aren't being saved. + * This struct defines the registers as used by PTRACE_{GET,SET}REGS. The + * format is the same for both 32- and 64-bit processes. Registers for 32-bit + * processes are sign extended. */ +#ifdef __KERNEL__ +struct user_pt_regs { +#else struct pt_regs { +#endif /* Saved main processor registers. */ - unsigned long regs[32]; + __u64 regs[32]; /* Saved special registers. */ - unsigned long cp0_status; - unsigned long hi; - unsigned long lo; - unsigned long cp0_badvaddr; - unsigned long cp0_cause; - unsigned long cp0_epc; + __u64 lo; + __u64 hi; + __u64 cp0_epc; + __u64 cp0_badvaddr; + __u64 cp0_status; + __u64 cp0_cause; } __attribute__ ((aligned (8))); -#endif /* __KERNEL__ */ /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 diff --git a/arch/mips/include/uapi/asm/reg.h b/arch/mips/include/uapi/asm/reg.h new file mode 100644 index 000000000000..081e377f4f02 --- /dev/null +++ b/arch/mips/include/uapi/asm/reg.h @@ -0,0 +1,206 @@ +/* + * Various register offset definitions for debuggers, core file + * examiners and whatnot. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 1999 Ralf Baechle + * Copyright (C) 1995, 1999 Silicon Graphics + */ +#ifndef __UAPI_ASM_MIPS_REG_H +#define __UAPI_ASM_MIPS_REG_H + +#define MIPS32_EF_R0 6 +#define MIPS32_EF_R1 7 +#define MIPS32_EF_R2 8 +#define MIPS32_EF_R3 9 +#define MIPS32_EF_R4 10 +#define MIPS32_EF_R5 11 +#define MIPS32_EF_R6 12 +#define MIPS32_EF_R7 13 +#define MIPS32_EF_R8 14 +#define MIPS32_EF_R9 15 +#define MIPS32_EF_R10 16 +#define MIPS32_EF_R11 17 +#define MIPS32_EF_R12 18 +#define MIPS32_EF_R13 19 +#define MIPS32_EF_R14 20 +#define MIPS32_EF_R15 21 +#define MIPS32_EF_R16 22 +#define MIPS32_EF_R17 23 +#define MIPS32_EF_R18 24 +#define MIPS32_EF_R19 25 +#define MIPS32_EF_R20 26 +#define MIPS32_EF_R21 27 +#define MIPS32_EF_R22 28 +#define MIPS32_EF_R23 29 +#define MIPS32_EF_R24 30 +#define MIPS32_EF_R25 31 + +/* + * k0/k1 unsaved + */ +#define MIPS32_EF_R26 32 +#define MIPS32_EF_R27 33 + +#define MIPS32_EF_R28 34 +#define MIPS32_EF_R29 35 +#define MIPS32_EF_R30 36 +#define MIPS32_EF_R31 37 + +/* + * Saved special registers + */ +#define MIPS32_EF_LO 38 +#define MIPS32_EF_HI 39 + +#define MIPS32_EF_CP0_EPC 40 +#define MIPS32_EF_CP0_BADVADDR 41 +#define MIPS32_EF_CP0_STATUS 42 +#define MIPS32_EF_CP0_CAUSE 43 +#define MIPS32_EF_UNUSED0 44 + +#define MIPS32_EF_SIZE 180 + +#define MIPS64_EF_R0 0 +#define MIPS64_EF_R1 1 +#define MIPS64_EF_R2 2 +#define MIPS64_EF_R3 3 +#define MIPS64_EF_R4 4 +#define MIPS64_EF_R5 5 +#define MIPS64_EF_R6 6 +#define MIPS64_EF_R7 7 +#define MIPS64_EF_R8 8 +#define MIPS64_EF_R9 9 +#define MIPS64_EF_R10 10 +#define MIPS64_EF_R11 11 +#define MIPS64_EF_R12 12 +#define MIPS64_EF_R13 13 +#define MIPS64_EF_R14 14 +#define MIPS64_EF_R15 15 +#define MIPS64_EF_R16 16 +#define MIPS64_EF_R17 17 +#define MIPS64_EF_R18 18 +#define MIPS64_EF_R19 19 +#define MIPS64_EF_R20 20 +#define MIPS64_EF_R21 21 +#define MIPS64_EF_R22 22 +#define MIPS64_EF_R23 23 +#define MIPS64_EF_R24 24 +#define MIPS64_EF_R25 25 + +/* + * k0/k1 unsaved + */ +#define MIPS64_EF_R26 26 +#define MIPS64_EF_R27 27 + + +#define MIPS64_EF_R28 28 +#define MIPS64_EF_R29 29 +#define MIPS64_EF_R30 30 +#define MIPS64_EF_R31 31 + +/* + * Saved special registers + */ +#define MIPS64_EF_LO 32 +#define MIPS64_EF_HI 33 + +#define MIPS64_EF_CP0_EPC 34 +#define MIPS64_EF_CP0_BADVADDR 35 +#define MIPS64_EF_CP0_STATUS 36 +#define MIPS64_EF_CP0_CAUSE 37 + +#define MIPS64_EF_SIZE 304 /* size in bytes */ + +#if _MIPS_SIM == _MIPS_SIM_ABI32 + +#define EF_R0 MIPS32_EF_R0 +#define EF_R1 MIPS32_EF_R1 +#define EF_R2 MIPS32_EF_R2 +#define EF_R3 MIPS32_EF_R3 +#define EF_R4 MIPS32_EF_R4 +#define EF_R5 MIPS32_EF_R5 +#define EF_R6 MIPS32_EF_R6 +#define EF_R7 MIPS32_EF_R7 +#define EF_R8 MIPS32_EF_R8 +#define EF_R9 MIPS32_EF_R9 +#define EF_R10 MIPS32_EF_R10 +#define EF_R11 MIPS32_EF_R11 +#define EF_R12 MIPS32_EF_R12 +#define EF_R13 MIPS32_EF_R13 +#define EF_R14 MIPS32_EF_R14 +#define EF_R15 MIPS32_EF_R15 +#define EF_R16 MIPS32_EF_R16 +#define EF_R17 MIPS32_EF_R17 +#define EF_R18 MIPS32_EF_R18 +#define EF_R19 MIPS32_EF_R19 +#define EF_R20 MIPS32_EF_R20 +#define EF_R21 MIPS32_EF_R21 +#define EF_R22 MIPS32_EF_R22 +#define EF_R23 MIPS32_EF_R23 +#define EF_R24 MIPS32_EF_R24 +#define EF_R25 MIPS32_EF_R25 +#define EF_R26 MIPS32_EF_R26 +#define EF_R27 MIPS32_EF_R27 +#define EF_R28 MIPS32_EF_R28 +#define EF_R29 MIPS32_EF_R29 +#define EF_R30 MIPS32_EF_R30 +#define EF_R31 MIPS32_EF_R31 +#define EF_LO MIPS32_EF_LO +#define EF_HI MIPS32_EF_HI +#define EF_CP0_EPC MIPS32_EF_CP0_EPC +#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR +#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS +#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE +#define EF_UNUSED0 MIPS32_EF_UNUSED0 +#define EF_SIZE MIPS32_EF_SIZE + +#elif _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 + +#define EF_R0 MIPS64_EF_R0 +#define EF_R1 MIPS64_EF_R1 +#define EF_R2 MIPS64_EF_R2 +#define EF_R3 MIPS64_EF_R3 +#define EF_R4 MIPS64_EF_R4 +#define EF_R5 MIPS64_EF_R5 +#define EF_R6 MIPS64_EF_R6 +#define EF_R7 MIPS64_EF_R7 +#define EF_R8 MIPS64_EF_R8 +#define EF_R9 MIPS64_EF_R9 +#define EF_R10 MIPS64_EF_R10 +#define EF_R11 MIPS64_EF_R11 +#define EF_R12 MIPS64_EF_R12 +#define EF_R13 MIPS64_EF_R13 +#define EF_R14 MIPS64_EF_R14 +#define EF_R15 MIPS64_EF_R15 +#define EF_R16 MIPS64_EF_R16 +#define EF_R17 MIPS64_EF_R17 +#define EF_R18 MIPS64_EF_R18 +#define EF_R19 MIPS64_EF_R19 +#define EF_R20 MIPS64_EF_R20 +#define EF_R21 MIPS64_EF_R21 +#define EF_R22 MIPS64_EF_R22 +#define EF_R23 MIPS64_EF_R23 +#define EF_R24 MIPS64_EF_R24 +#define EF_R25 MIPS64_EF_R25 +#define EF_R26 MIPS64_EF_R26 +#define EF_R27 MIPS64_EF_R27 +#define EF_R28 MIPS64_EF_R28 +#define EF_R29 MIPS64_EF_R29 +#define EF_R30 MIPS64_EF_R30 +#define EF_R31 MIPS64_EF_R31 +#define EF_LO MIPS64_EF_LO +#define EF_HI MIPS64_EF_HI +#define EF_CP0_EPC MIPS64_EF_CP0_EPC +#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR +#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS +#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE +#define EF_SIZE MIPS64_EF_SIZE + +#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ + +#endif /* __UAPI_ASM_MIPS_REG_H */ diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index ac9a8f9cd1fb..8f2d184dbe9f 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h @@ -13,12 +13,16 @@ #define __SWAB_64_THRU_32__ -#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) +#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ + defined(_MIPS_ARCH_LOONGSON3A) static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { __asm__( + " .set push \n" + " .set arch=mips32r2 \n" " wsbh %0, %1 \n" + " .set pop \n" : "=r" (x) : "r" (x)); @@ -29,8 +33,11 @@ static inline __attribute_const__ __u16 __arch_swab16(__u16 x) static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __asm__( + " .set push \n" + " .set arch=mips32r2 \n" " wsbh %0, %1 \n" " rotr %0, %0, 16 \n" + " .set pop \n" : "=r" (x) : "r" (x)); @@ -46,8 +53,11 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( - " dsbh %0, %1\n" - " dshd %0, %0" + " .set push \n" + " .set arch=mips64r2 \n" + " dsbh %0, %1 \n" + " dshd %0, %0 \n" + " .set pop \n" : "=r" (x) : "r" (x)); @@ -55,5 +65,5 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) } #define __arch_swab64 __arch_swab64 #endif /* __mips64 */ -#endif /* MIPS R2 or newer */ +#endif /* MIPS R2 or newer or Loongson 3A */ #endif /* _ASM_SWAB_H */ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 5805414777e0..fdb4923777d1 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -372,16 +372,19 @@ #define __NR_sched_setattr (__NR_Linux + 349) #define __NR_sched_getattr (__NR_Linux + 350) #define __NR_renameat2 (__NR_Linux + 351) +#define __NR_seccomp (__NR_Linux + 352) +#define __NR_getrandom (__NR_Linux + 353) +#define __NR_memfd_create (__NR_Linux + 354) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 351 +#define __NR_Linux_syscalls 354 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 351 +#define __NR_O32_Linux_syscalls 354 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -701,16 +704,19 @@ #define __NR_sched_setattr (__NR_Linux + 309) #define __NR_sched_getattr (__NR_Linux + 310) #define __NR_renameat2 (__NR_Linux + 311) +#define __NR_seccomp (__NR_Linux + 312) +#define __NR_getrandom (__NR_Linux + 313) +#define __NR_memfd_create (__NR_Linux + 314) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 311 +#define __NR_Linux_syscalls 314 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 311 +#define __NR_64_Linux_syscalls 314 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1034,15 +1040,18 @@ #define __NR_sched_setattr (__NR_Linux + 313) #define __NR_sched_getattr (__NR_Linux + 314) #define __NR_renameat2 (__NR_Linux + 315) +#define __NR_seccomp (__NR_Linux + 316) +#define __NR_getrandom (__NR_Linux + 317) +#define __NR_memfd_create (__NR_Linux + 318) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 315 +#define __NR_Linux_syscalls 318 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 315 +#define __NR_N32_Linux_syscalls 318 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 088e92a79ae6..c454525e7695 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio.h> +#include <linux/gpio/machine.h> #include <linux/input.h> #include <linux/gpio_keys.h> diff --git a/arch/mips/jz4740/clock-debugfs.c b/arch/mips/jz4740/clock-debugfs.c index a8acdeff267e..325422d0d453 100644 --- a/arch/mips/jz4740/clock-debugfs.c +++ b/arch/mips/jz4740/clock-debugfs.c @@ -87,8 +87,7 @@ void jz4740_clock_debugfs_add_clk(struct clk *clk) /* TODO: Locking */ void jz4740_clock_debugfs_update_parent(struct clk *clk) { - if (clk->debugfs_parent_entry) - debugfs_remove(clk->debugfs_parent_entry); + debugfs_remove(clk->debugfs_parent_entry); if (clk->parent) { char parent_path[100]; diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c index a447101cf9f1..0b12f273cb2e 100644 --- a/arch/mips/jz4740/platform.c +++ b/arch/mips/jz4740/platform.c @@ -59,7 +59,7 @@ struct platform_device jz4740_usb_ohci_device = { /* USB Device Controller */ struct platform_device jz4740_udc_xceiv_device = { - .name = "usb_phy_gen_xceiv", + .name = "usb_phy_generic", .id = 0, }; diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 4bb5107511e2..b1d84bd4efb3 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -234,6 +234,7 @@ void output_thread_fpu_defines(void) thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]); OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); + OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); BLANK(); } diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 7faf5f2bee25..928767858b86 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -72,22 +72,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #include <asm/processor.h> -/* - * When this file is selected, we are definitely running a 64bit kernel. - * So using the right regs define in asm/reg.h - */ -#define WANT_COMPAT_REG_H - -/* These MUST be defined before elf.h gets included */ -extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs); -#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs); -#define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \ -({ \ - int __res = 1; \ - elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \ - __res; \ -}) - #include <linux/module.h> #include <linux/elfcore.h> #include <linux/compat.h> @@ -145,28 +129,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) value->tv_usec = rem / NSEC_PER_USEC; } -void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs) -{ - int i; - - for (i = 0; i < EF_R0; i++) - grp[i] = 0; - grp[EF_R0] = 0; - for (i = 1; i <= 31; i++) - grp[EF_R0 + i] = (elf_greg_t) regs->regs[i]; - grp[EF_R26] = 0; - grp[EF_R27] = 0; - grp[EF_LO] = (elf_greg_t) regs->lo; - grp[EF_HI] = (elf_greg_t) regs->hi; - grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; - grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; - grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; - grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; -#ifdef EF_UNUSED0 - grp[EF_UNUSED0] = 0; -#endif -} - MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries"); MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 6f4f739dad96..e6e97d2a5c9e 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -13,6 +13,7 @@ #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> +#include <asm/eva.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/pm.h> @@ -166,6 +167,9 @@ dcache_done: 1: jal mips_cps_core_init nop + /* Do any EVA initialization if necessary */ + eva_init + /* * Boot any other VPEs within this core that should be online, and * deactivate this VPE if it should be offline. diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index d74f957c561e..94c4a0c0a577 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -27,6 +27,7 @@ #include <asm/msa.h> #include <asm/watch.h> #include <asm/elf.h> +#include <asm/pgtable-bits.h> #include <asm/spram.h> #include <asm/uaccess.h> @@ -54,6 +55,20 @@ static int __init dsp_disable(char *s) __setup("nodsp", dsp_disable); +static int mips_htw_disabled; + +static int __init htw_disable(char *s) +{ + mips_htw_disabled = 1; + cpu_data[0].options &= ~MIPS_CPU_HTW; + write_c0_pwctl(read_c0_pwctl() & + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); + + return 1; +} + +__setup("nohtw", htw_disable); + static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -130,14 +145,13 @@ static inline int __cpu_has_fpu(void) static inline unsigned long cpu_get_msa_id(void) { - unsigned long status, conf5, msa_id; + unsigned long status, msa_id; status = read_c0_status(); __enable_fpu(FPU_64BIT); - conf5 = read_c0_config5(); enable_msa(); msa_id = read_msa_ir(); - write_c0_config5(conf5); + disable_msa(); write_c0_status(status); return msa_id; } @@ -321,6 +335,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_SEGMENTS; if (config3 & MIPS_CONF3_MSA) c->ases |= MIPS_ASE_MSA; + /* Only tested on 32-bit cores */ + if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) + c->options |= MIPS_CPU_HTW; return config3 & MIPS_CONF_M; } @@ -389,6 +406,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) if (config5 & MIPS_CONF5_EVA) c->options |= MIPS_CPU_EVA; + if (config5 & MIPS_CONF5_MRP) + c->options |= MIPS_CPU_MAAR; return config5 & MIPS_CONF_M; } @@ -421,6 +440,15 @@ static void decode_configs(struct cpuinfo_mips *c) mips_probe_watch_registers(c); + if (cpu_has_rixi) { + /* Enable the RIXI exceptions */ + write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); + back_to_back_c0_hazard(); + /* Verify the IEC bit is set */ + if (read_c0_pagegrain() & PG_IEC) + c->options |= MIPS_CPU_RIXIEX; + } + #ifndef CONFIG_MIPS_CPS if (cpu_has_mips_r2) { c->core = get_ebase_cpunum(); @@ -737,9 +765,16 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); break; + case PRID_REV_LOONGSON3B_R1: + case PRID_REV_LOONGSON3B_R2: + c->cputype = CPU_LOONGSON3; + __cpu_name[cpu] = "ICT Loongson-3"; + set_elf_platform(cpu, "loongson3b"); + break; } set_isa(c, MIPS_CPU_ISA_III); @@ -765,67 +800,83 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { + c->writecombine = _CACHE_UNCACHED_ACCELERATED; switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_4KC: c->cputype = CPU_4KC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 4Kc"; break; case PRID_IMP_4KEC: case PRID_IMP_4KECR2: c->cputype = CPU_4KEC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 4KEc"; break; case PRID_IMP_4KSC: case PRID_IMP_4KSD: c->cputype = CPU_4KSC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 4KSc"; break; case PRID_IMP_5KC: c->cputype = CPU_5KC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 5Kc"; break; case PRID_IMP_5KE: c->cputype = CPU_5KE; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 5KE"; break; case PRID_IMP_20KC: c->cputype = CPU_20KC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 20Kc"; break; case PRID_IMP_24K: c->cputype = CPU_24K; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 24Kc"; break; case PRID_IMP_24KE: c->cputype = CPU_24K; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 24KEc"; break; case PRID_IMP_25KF: c->cputype = CPU_25KF; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 25Kc"; break; case PRID_IMP_34K: c->cputype = CPU_34K; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 34Kc"; break; case PRID_IMP_74K: c->cputype = CPU_74K; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 74Kc"; break; case PRID_IMP_M14KC: c->cputype = CPU_M14KC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS M14Kc"; break; case PRID_IMP_M14KEC: c->cputype = CPU_M14KEC; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS M14KEc"; break; case PRID_IMP_1004K: c->cputype = CPU_1004K; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 1004Kc"; break; case PRID_IMP_1074K: c->cputype = CPU_1074K; + c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 1074Kc"; break; case PRID_IMP_INTERAPTIV_UP: @@ -899,6 +950,7 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); + c->writecombine = _CACHE_UNCACHED_ACCELERATED; switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_SB1: c->cputype = CPU_SB1; @@ -1030,6 +1082,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_JZRISC: c->cputype = CPU_JZRISC; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "Ingenic JZRISC"; break; default: @@ -1136,6 +1189,7 @@ void cpu_probe(void) c->processor_id = PRID_IMP_UNKNOWN; c->fpu_id = FPIR_IMP_NONE; c->cputype = CPU_UNKNOWN; + c->writecombine = _CACHE_UNCACHED; c->processor_id = read_c0_prid(); switch (c->processor_id & PRID_COMP_MASK) { @@ -1187,6 +1241,12 @@ void cpu_probe(void) if (mips_dsp_disabled) c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); + if (mips_htw_disabled) { + c->options &= ~MIPS_CPU_HTW; + write_c0_pwctl(read_c0_pwctl() & + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); + } + if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 60e7e5e45af1..937c54bc8ccc 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -63,7 +63,7 @@ static inline int in_kernel_space(unsigned long ip) ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) static unsigned int insn_jal_ftrace_caller __read_mostly; -static unsigned int insn_lui_v1_hi16_mcount __read_mostly; +static unsigned int insn_la_mcount[2] __read_mostly; static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; static inline void ftrace_dyn_arch_init_insns(void) @@ -71,10 +71,10 @@ static inline void ftrace_dyn_arch_init_insns(void) u32 *buf; unsigned int v1; - /* lui v1, hi16_mcount */ + /* la v1, _mcount */ v1 = 3; - buf = (u32 *)&insn_lui_v1_hi16_mcount; - UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); + buf = (u32 *)&insn_la_mcount[0]; + UASM_i_LA(&buf, v1, MCOUNT_ADDR); /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; @@ -111,14 +111,47 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; + mm_segment_t old_fs; safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) return -EFAULT; - safe_store_code(new_code2, ip + 4, faulted); + + ip += 4; + safe_store_code(new_code2, ip, faulted); if (unlikely(faulted)) return -EFAULT; + + ip -= 4; + old_fs = get_fs(); + set_fs(get_ds()); flush_icache_range(ip, ip + 8); + set_fs(old_fs); + + return 0; +} + +static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, + unsigned int new_code2) +{ + int faulted; + mm_segment_t old_fs; + + ip += 4; + safe_store_code(new_code2, ip, faulted); + if (unlikely(faulted)) + return -EFAULT; + + ip -= 4; + safe_store_code(new_code1, ip, faulted); + if (unlikely(faulted)) + return -EFAULT; + + old_fs = get_fs(); + set_fs(get_ds()); + flush_icache_range(ip, ip + 8); + set_fs(old_fs); + return 0; } #endif @@ -130,13 +163,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, * * move at, ra * jal _mcount --> nop + * sub sp, sp, 8 --> nop (CONFIG_32BIT) * * 2. For modules: * * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT * * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount + * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) * move at, ra * move $12, ra_address * jalr v1 @@ -145,7 +179,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, * 2.2 For the Other situations * * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount + * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) * move at, ra * jalr v1 * nop | move $12, ra_address | sub sp, sp, 8 @@ -184,10 +218,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - new = in_kernel_space(ip) ? insn_jal_ftrace_caller : - insn_lui_v1_hi16_mcount; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; +#ifdef CONFIG_64BIT return ftrace_modify_code(ip, new); +#else + return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? + INSN_NOP : insn_la_mcount[1]); +#endif } #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) @@ -302,6 +340,9 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, &return_to_handler; int faulted, insns; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 88e4c323382c..9e9d8b9a5b97 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -28,6 +28,18 @@ unsigned int gic_irq_flags[GIC_NUM_INTRS]; /* The index into this array is the vector # of the interrupt. */ struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS]; +struct gic_pcpu_mask { + DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS); +}; + +struct gic_pending_regs { + DECLARE_BITMAP(pending, GIC_NUM_INTRS); +}; + +struct gic_intrmask_regs { + DECLARE_BITMAP(intrmask, GIC_NUM_INTRS); +}; + static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static struct gic_pending_regs pending_regs[NR_CPUS]; static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; @@ -177,7 +189,7 @@ unsigned int gic_compare_int(void) return 0; } -unsigned int gic_get_int(void) +void gic_get_int_mask(unsigned long *dst, const unsigned long *src) { unsigned int i; unsigned long *pending, *intrmask, *pcpu_mask; @@ -202,8 +214,17 @@ unsigned int gic_get_int(void) bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS); bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS); + bitmap_and(dst, src, pending, GIC_NUM_INTRS); +} - return find_first_bit(pending, GIC_NUM_INTRS); +unsigned int gic_get_int(void) +{ + DECLARE_BITMAP(interrupts, GIC_NUM_INTRS); + + bitmap_fill(interrupts, GIC_NUM_INTRS); + gic_get_int_mask(interrupts, interrupts); + + return find_first_bit(interrupts, GIC_NUM_INTRS); } static void gic_mask_irq(struct irq_data *d) @@ -269,11 +290,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, /* Setup Intr to Pin mapping */ if (pin & GIC_MAP_TO_NMI_MSK) { + int i; + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); /* FIXME: hack to route NMI to all cpu's */ - for (cpu = 0; cpu < NR_CPUS; cpu += 32) { + for (i = 0; i < NR_CPUS; i += 32) { GICWRITE(GIC_REG_ADDR(SHARED, - GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)), + GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)), 0xffffffff); } } else { @@ -299,9 +322,10 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, /* Init Intr Masks */ GIC_CLR_INTR_MASK(intr); + /* Initialise per-cpu Interrupt software masks */ - if (flags & GIC_FLAG_IPI) - set_bit(intr, pcpu_masks[cpu].pcpu_mask); + set_bit(intr, pcpu_masks[cpu].pcpu_mask); + if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0)) GIC_SET_INTR_MASK(intr); if (trigtype == GIC_TRIG_EDGE) @@ -340,8 +364,6 @@ static void __init gic_basic_init(int numintrs, int numvpes, cpu = intrmap[i].cpunum; if (cpu == GIC_UNUSED) continue; - if (cpu == 0 && i != 0 && intrmap[i].flags == 0) - continue; gic_setup_intr(i, intrmap[i].cpunum, intrmap[i].pin + pin_offset, diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 1f8187ab0997..212f46f2014e 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -224,7 +224,7 @@ static void save_previous_kprobe(struct kprobe_ctlblk *kcb) static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; @@ -234,7 +234,7 @@ static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = p; + __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); kcb->kprobe_saved_epc = regs->cp0_epc; } @@ -385,7 +385,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) ret = 1; goto no_kprobe; } - p = __get_cpu_var(current_kprobe); + p = __this_cpu_read(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 992e18474da5..50980bf3983e 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -71,8 +71,12 @@ machine_kexec(struct kimage *image) kexec_start_address = (unsigned long) phys_to_virt(image->start); - kexec_indirection_page = - (unsigned long) phys_to_virt(image->head & PAGE_MASK); + if (image->type == KEXEC_TYPE_DEFAULT) { + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + } else { + kexec_indirection_page = (unsigned long)&image->head; + } memcpy((void*)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 539b6294b613..2f7c734771f4 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -74,16 +74,25 @@ _mcount: #endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ - lw t1, function_trace_stop - bnez t1, ftrace_stub - nop - MCOUNT_SAVE_REGS #ifdef KBUILD_MCOUNT_RA_ADDRESS PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) #endif PTR_SUBU a0, ra, 8 /* arg1: self address */ + PTR_LA t1, _stext + sltu t2, a0, t1 /* t2 = (a0 < _stext) */ + PTR_LA t1, _etext + sltu t3, t1, a0 /* t3 = (a0 > _etext) */ + or t1, t2, t3 + beqz t1, ftrace_call + nop +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) + PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */ +#else + PTR_SUBU a0, a0, 12 +#endif + .globl ftrace_call ftrace_call: nop /* a placeholder for the call to a real tracing function */ @@ -105,9 +114,6 @@ ftrace_stub: #else /* ! CONFIG_DYNAMIC_FTRACE */ NESTED(_mcount, PT_SIZE, ra) - lw t1, function_trace_stop - bnez t1, ftrace_stub - nop PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ bne t1, t2, static_trace @@ -123,7 +129,11 @@ NESTED(_mcount, PT_SIZE, ra) nop #endif b ftrace_stub +#ifdef CONFIG_32BIT + addiu sp, sp, 8 +#else nop +#endif static_trace: MCOUNT_SAVE_REGS @@ -133,6 +143,9 @@ static_trace: move a1, AT /* arg2: parent's return address */ MCOUNT_RESTORE_REGS +#ifdef CONFIG_32BIT + addiu sp, sp, 8 +#endif .globl ftrace_stub ftrace_stub: RETURN_BACK @@ -177,6 +190,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) jal prepare_ftrace_return nop MCOUNT_RESTORE_REGS +#ifndef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_32BIT + addiu sp, sp, 8 +#endif +#endif RETURN_BACK END(ftrace_graph_caller) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 4f2d9dece7ab..a8f9cdc6f8b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -340,7 +340,7 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); WARN_ON(idx < 0 || idx >= mipspmu.num_counters); @@ -360,7 +360,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) static void mipsxx_pmu_disable_event(int idx) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); unsigned long flags; WARN_ON(idx < 0 || idx >= mipspmu.num_counters); @@ -460,7 +460,7 @@ static void mipspmu_stop(struct perf_event *event, int flags) static int mipspmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx; int err = 0; @@ -496,7 +496,7 @@ out: static void mipspmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -558,7 +558,7 @@ static int mipspmu_get_irq(void) if (mipspmu.irq >= 0) { /* Request my own irq handler. */ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, - IRQF_PERCPU | IRQF_NOBALANCING, + IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, "mips_perf_pmu", NULL); if (err) { pr_warning("Unable to request IRQ%d for MIPS " @@ -1275,7 +1275,7 @@ static int __hw_perf_event_init(struct perf_event *event) static void pause_local_counters(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int ctr = mipspmu.num_counters; unsigned long flags; @@ -1291,7 +1291,7 @@ static void pause_local_counters(void) static void resume_local_counters(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int ctr = mipspmu.num_counters; do { @@ -1302,7 +1302,7 @@ static void resume_local_counters(void) static int mipsxx_pmu_handle_shared_irq(void) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_sample_data data; unsigned int counters = mipspmu.num_counters; u64 counter; @@ -1386,6 +1386,9 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) /* proAptiv */ #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \ ((b) == 0 || (b) == 1) +/* P5600 */ +#define IS_BOTH_COUNTERS_P5600_EVENT(b) \ + ((b) == 0 || (b) == 1) /* 1004K */ #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ @@ -1420,20 +1423,23 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) /* - * User can use 0-255 raw events, where 0-127 for the events of even - * counters, and 128-255 for odd counters. Note that bit 7 is used to - * indicate the parity. So, for example, when user wants to take the - * Event Num of 15 for odd counters (by referring to the user manual), - * then 128 needs to be added to 15 as the input for the event config, - * i.e., 143 (0x8F) to be used. + * For most cores the user can use 0-255 raw events, where 0-127 for the events + * of even counters, and 128-255 for odd counters. Note that bit 7 is used to + * indicate the even/odd bank selector. So, for example, when user wants to take + * the Event Num of 15 for odd counters (by referring to the user manual), then + * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F) + * to be used. + * + * Some newer cores have even more events, in which case the user can use raw + * events 0-511, where 0-255 are for the events of even counters, and 256-511 + * are for odd counters, so bit 8 is used to indicate the even/odd bank selector. */ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) { + /* currently most cores have 7-bit event numbers */ unsigned int raw_id = config & 0xff; unsigned int base_id = raw_id & 0x7f; - raw_event.event_id = base_id; - switch (current_cpu_type()) { case CPU_24K: if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) @@ -1485,6 +1491,19 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_event.range = P; #endif break; + case CPU_P5600: + /* 8-bit event numbers */ + raw_id = config & 0x1ff; + base_id = raw_id & 0xff; + if (IS_BOTH_COUNTERS_P5600_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 255 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + raw_event.range = P; +#endif + break; case CPU_1004K: if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; @@ -1523,6 +1542,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_id > 127 ? CNTR_ODD : CNTR_EVEN; } + raw_event.event_id = base_id; + return &raw_event; } @@ -1633,6 +1654,11 @@ init_hw_perf_events(void) mipspmu.general_event_map = &mipsxxcore_event_map2; mipspmu.cache_event_map = &mipsxxcore_cache_map2; break; + case CPU_P5600: + mipspmu.name = "mips/P5600"; + mipspmu.general_event_map = &mipsxxcore_event_map2; + mipspmu.cache_event_map = &mipsxxcore_cache_map2; + break; case CPU_1004K: mipspmu.name = "mips/1004K"; mipspmu.general_event_map = &mipsxxcore_event_map; diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index c4c2069d3a20..06147179a175 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -149,8 +149,12 @@ int cps_pm_enter_state(enum cps_pm_state state) /* Setup the VPE to run mips_cps_pm_restore when started again */ if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + /* Power gating relies upon CPS SMP */ + if (!mips_cps_smp_in_use()) + return -EINVAL; + core_cfg = &mips_cps_core_bootcfg[core]; - vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; + vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; vpe_cfg->gp = (unsigned long)current_thread_info(); vpe_cfg->sp = 0; @@ -376,6 +380,10 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) memset(relocs, 0, sizeof(relocs)); if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + /* Power gating relies upon CPS SMP */ + if (!mips_cps_smp_in_use()) + goto out_err; + /* * Save CPU state. Note the non-standard calling convention * with the return address placed in v0 to avoid clobbering diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 037a44d962f3..097fc8d14e42 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -113,6 +113,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_vz) seq_printf(m, "%s", " vz"); if (cpu_has_msa) seq_printf(m, "%s", " msa"); if (cpu_has_eva) seq_printf(m, "%s", " eva"); + if (cpu_has_htw) seq_printf(m, "%s", " htw"); seq_printf(m, "\n"); if (cpu_has_mmips) { @@ -123,6 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpu_data[n].srsets); seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); + seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 0a1ec0f3beff..636b0745d7c7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -21,7 +21,6 @@ #include <linux/mman.h> #include <linux/personality.h> #include <linux/sys.h> -#include <linux/user.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/kallsyms.h> @@ -36,6 +35,7 @@ #include <asm/pgtable.h> #include <asm/mipsregs.h> #include <asm/processor.h> +#include <asm/reg.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/elf.h> @@ -66,6 +66,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) clear_used_math(); clear_fpu_owner(); init_dsp(); + clear_thread_flag(TIF_USEDMSA); clear_thread_flag(TIF_MSA_CTX_LIVE); disable_msa(); regs->cp0_epc = pc; @@ -141,6 +142,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); clear_tsk_thread_flag(p, TIF_USEDFPU); + clear_tsk_thread_flag(p, TIF_USEDMSA); + clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); @@ -152,61 +155,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, return 0; } -/* Fill in the fpu structure for a core dump.. */ -int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) -{ - int i; - - for (i = 0; i < NUM_FPU_REGS; i++) - memcpy(&r[i], ¤t->thread.fpu.fpr[i], sizeof(*r)); - - memcpy(&r[NUM_FPU_REGS], ¤t->thread.fpu.fcr31, - sizeof(current->thread.fpu.fcr31)); - - return 1; -} - -void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs) -{ - int i; - - for (i = 0; i < EF_R0; i++) - gp[i] = 0; - gp[EF_R0] = 0; - for (i = 1; i <= 31; i++) - gp[EF_R0 + i] = regs->regs[i]; - gp[EF_R26] = 0; - gp[EF_R27] = 0; - gp[EF_LO] = regs->lo; - gp[EF_HI] = regs->hi; - gp[EF_CP0_EPC] = regs->cp0_epc; - gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr; - gp[EF_CP0_STATUS] = regs->cp0_status; - gp[EF_CP0_CAUSE] = regs->cp0_cause; -#ifdef EF_UNUSED0 - gp[EF_UNUSED0] = 0; -#endif -} - -int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) -{ - elf_dump_regs(*regs, task_pt_regs(tsk)); - return 1; -} - -int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) -{ - int i; - - for (i = 0; i < NUM_FPU_REGS; i++) - memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr)); - - memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31, - sizeof(t->thread.fpu.fcr31)); - - return 1; -} - #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index d8a76f97a053..9d1487d83293 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -24,7 +24,6 @@ #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/smp.h> -#include <linux/user.h> #include <linux/security.h> #include <linux/tracehook.h> #include <linux/audit.h> @@ -63,7 +62,7 @@ void ptrace_disable(struct task_struct *child) * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. * Registers are sign extended to fill the available space. */ -int ptrace_getregs(struct task_struct *child, __s64 __user *data) +int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) { struct pt_regs *regs; int i; @@ -74,13 +73,13 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data) regs = task_pt_regs(child); for (i = 0; i < 32; i++) - __put_user((long)regs->regs[i], data + i); - __put_user((long)regs->lo, data + EF_LO - EF_R0); - __put_user((long)regs->hi, data + EF_HI - EF_R0); - __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0); - __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0); - __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0); - __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0); + __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); + __put_user((long)regs->lo, (__s64 __user *)&data->lo); + __put_user((long)regs->hi, (__s64 __user *)&data->hi); + __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); + __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); + __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); + __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); return 0; } @@ -90,7 +89,7 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data) * the 64-bit format. On a 32-bit kernel only the lower order half * (according to endianness) will be used. */ -int ptrace_setregs(struct task_struct *child, __s64 __user *data) +int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) { struct pt_regs *regs; int i; @@ -101,10 +100,10 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data) regs = task_pt_regs(child); for (i = 0; i < 32; i++) - __get_user(regs->regs[i], data + i); - __get_user(regs->lo, data + EF_LO - EF_R0); - __get_user(regs->hi, data + EF_HI - EF_R0); - __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0); + __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); + __get_user(regs->lo, (__s64 __user *)&data->lo); + __get_user(regs->hi, (__s64 __user *)&data->hi); + __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); /* badvaddr, status, and cause may not be written. */ @@ -129,7 +128,7 @@ int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) } __put_user(child->thread.fpu.fcr31, data + 64); - __put_user(current_cpu_data.fpu_id, data + 65); + __put_user(boot_cpu_data.fpu_id, data + 65); return 0; } @@ -151,6 +150,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) } __get_user(child->thread.fpu.fcr31, data + 64); + child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* FIR may not be written. */ @@ -246,36 +246,160 @@ int ptrace_set_watch_regs(struct task_struct *child, /* regset get/set implementations */ -static int gpr_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) +#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) + +static int gpr32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); + u32 uregs[ELF_NGREG] = {}; + unsigned i; + + for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { + /* k0/k1 are copied as zero. */ + if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) + continue; + + uregs[i] = regs->regs[i - MIPS32_EF_R0]; + } - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - regs, 0, sizeof(*regs)); + uregs[MIPS32_EF_LO] = regs->lo; + uregs[MIPS32_EF_HI] = regs->hi; + uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; + uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; + uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; + uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); } -static int gpr_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) +static int gpr32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - struct pt_regs newregs; - int ret; + struct pt_regs *regs = task_pt_regs(target); + u32 uregs[ELF_NGREG]; + unsigned start, num_regs, i; + int err; + + start = pos / sizeof(u32); + num_regs = count / sizeof(u32); + + if (start + num_regs > ELF_NGREG) + return -EIO; + + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); + if (err) + return err; + + for (i = start; i < num_regs; i++) { + /* + * Cast all values to signed here so that if this is a 64-bit + * kernel, the supplied 32-bit values will be sign extended. + */ + switch (i) { + case MIPS32_EF_R1 ... MIPS32_EF_R25: + /* k0/k1 are ignored. */ + case MIPS32_EF_R28 ... MIPS32_EF_R31: + regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; + break; + case MIPS32_EF_LO: + regs->lo = (s32)uregs[i]; + break; + case MIPS32_EF_HI: + regs->hi = (s32)uregs[i]; + break; + case MIPS32_EF_CP0_EPC: + regs->cp0_epc = (s32)uregs[i]; + break; + } + } + + return 0; +} + +#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ + +#ifdef CONFIG_64BIT + +static int gpr64_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + u64 uregs[ELF_NGREG] = {}; + unsigned i; + + for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { + /* k0/k1 are copied as zero. */ + if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) + continue; + + uregs[i] = regs->regs[i - MIPS64_EF_R0]; + } + + uregs[MIPS64_EF_LO] = regs->lo; + uregs[MIPS64_EF_HI] = regs->hi; + uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; + uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; + uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; + uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); +} - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &newregs, - 0, sizeof(newregs)); - if (ret) - return ret; +static int gpr64_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + u64 uregs[ELF_NGREG]; + unsigned start, num_regs, i; + int err; + + start = pos / sizeof(u64); + num_regs = count / sizeof(u64); - *task_pt_regs(target) = newregs; + if (start + num_regs > ELF_NGREG) + return -EIO; + + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); + if (err) + return err; + + for (i = start; i < num_regs; i++) { + switch (i) { + case MIPS64_EF_R1 ... MIPS64_EF_R25: + /* k0/k1 are ignored. */ + case MIPS64_EF_R28 ... MIPS64_EF_R31: + regs->regs[i - MIPS64_EF_R0] = uregs[i]; + break; + case MIPS64_EF_LO: + regs->lo = uregs[i]; + break; + case MIPS64_EF_HI: + regs->hi = uregs[i]; + break; + case MIPS64_EF_CP0_EPC: + regs->cp0_epc = uregs[i]; + break; + } + } return 0; } +#endif /* CONFIG_64BIT */ + static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -337,14 +461,16 @@ enum mips_regset { REGSET_FPR, }; +#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) + static const struct user_regset mips_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(unsigned int), .align = sizeof(unsigned int), - .get = gpr_get, - .set = gpr_set, + .get = gpr32_get, + .set = gpr32_set, }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, @@ -364,14 +490,18 @@ static const struct user_regset_view user_mips_view = { .n = ARRAY_SIZE(mips_regsets), }; +#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ + +#ifdef CONFIG_64BIT + static const struct user_regset mips64_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(unsigned long), .align = sizeof(unsigned long), - .get = gpr_get, - .set = gpr_set, + .get = gpr64_get, + .set = gpr64_set, }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, @@ -384,25 +514,26 @@ static const struct user_regset mips64_regsets[] = { }; static const struct user_regset_view user_mips64_view = { - .name = "mips", + .name = "mips64", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = mips64_regsets, - .n = ARRAY_SIZE(mips_regsets), + .n = ARRAY_SIZE(mips64_regsets), }; +#endif /* CONFIG_64BIT */ + const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_32BIT return &user_mips_view; -#endif - +#else #ifdef CONFIG_MIPS32_O32 - if (test_thread_flag(TIF_32BIT_REGS)) - return &user_mips_view; + if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) + return &user_mips_view; #endif - return &user_mips64_view; +#endif } long arch_ptrace(struct task_struct *child, long request, @@ -480,7 +611,7 @@ long arch_ptrace(struct task_struct *child, long request, break; case FPC_EIR: /* implementation / version register */ - tmp = current_cpu_data.fpu_id; + tmp = boot_cpu_data.fpu_id; break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -565,7 +696,7 @@ long arch_ptrace(struct task_struct *child, long request, break; #endif case FPC_CSR: - child->thread.fpu.fcr31 = data; + child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X; break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -639,7 +770,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) long ret = 0; user_exit(); - if (secure_computing(syscall) == -1) + if (secure_computing() == -1) return -1; if (test_thread_flag(TIF_SYSCALL_TRACE) && diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index b40c3ca60ee5..283b5a1967d1 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -22,7 +22,6 @@ #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/smp.h> -#include <linux/user.h> #include <linux/security.h> #include <asm/cpu.h> @@ -32,6 +31,7 @@ #include <asm/mipsmtregs.h> #include <asm/pgtable.h> #include <asm/page.h> +#include <asm/reg.h> #include <asm/uaccess.h> #include <asm/bootinfo.h> @@ -129,7 +129,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; case FPC_EIR: /* implementation / version register */ - tmp = current_cpu_data.fpu_id; + tmp = boot_cpu_data.fpu_id; break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -256,11 +256,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } case PTRACE_GETREGS: - ret = ptrace_getregs(child, (__s64 __user *) (__u64) data); + ret = ptrace_getregs(child, + (struct user_pt_regs __user *) (__u64) data); break; case PTRACE_SETREGS: - ret = ptrace_setregs(child, (__s64 __user *) (__u64) data); + ret = ptrace_setregs(child, + (struct user_pt_regs __user *) (__u64) data); break; case PTRACE_GETFPREGS: diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 81ca3f70fe29..4c4ec1812420 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -64,8 +64,10 @@ /* Check whether we're saving scalar or vector context. */ bgtz a3, 1f - /* Save 128b MSA vector context. */ + /* Save 128b MSA vector context + scalar FP control & status. */ + cfc1 t1, fcr31 msa_save_all a0 + sw t1, THREAD_FCR31(a0) b 2f 1: /* Save 32b/64b scalar FP context. */ @@ -142,6 +144,11 @@ LEAF(_restore_msa) jr ra END(_restore_msa) +LEAF(_init_msa_upper) + msa_init_all_upper + jr ra + END(_init_msa_upper) + #endif /* diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c index 758fb3cd2326..d26dcc4b46e7 100644 --- a/arch/mips/kernel/rtlx-cmp.c +++ b/arch/mips/kernel/rtlx-cmp.c @@ -77,6 +77,9 @@ int __init rtlx_module_init(void) dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, "%s%d", RTLX_MODULE_NAME, i); if (IS_ERR(dev)) { + while (i--) + device_destroy(mt_class, MKDEV(major, i)); + err = PTR_ERR(dev); goto out_chrdev; } diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 5a66b975989e..cb95470e2e69 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c @@ -103,6 +103,9 @@ int __init rtlx_module_init(void) dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, "%s%d", RTLX_MODULE_NAME, i); if (IS_ERR(dev)) { + while (i--) + device_destroy(mt_class, MKDEV(major, i)); + err = PTR_ERR(dev); goto out_chrdev; } diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 3245474f19d5..744cd10ba599 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -67,8 +67,6 @@ NESTED(handle_sys, PT_SIZE, sp) /* * Ok, copy the args from the luser stack to the kernel stack. - * t3 is the precomputed number of instruction bytes needed to - * load or store arguments 6-8. */ .set push @@ -495,8 +493,8 @@ EXPORT(sys_call_table) PTR sys_tgkill PTR sys_utimes PTR sys_mbind - PTR sys_ni_syscall /* sys_get_mempolicy */ - PTR sys_ni_syscall /* 4270 sys_set_mempolicy */ + PTR sys_get_mempolicy + PTR sys_set_mempolicy /* 4270 */ PTR sys_mq_open PTR sys_mq_unlink PTR sys_mq_timedsend @@ -578,3 +576,6 @@ EXPORT(sys_call_table) PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ PTR sys_renameat2 + PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index be2fedd4ae33..002b1bc09c38 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -347,8 +347,8 @@ EXPORT(sys_call_table) PTR sys_tgkill /* 5225 */ PTR sys_utimes PTR sys_mbind - PTR sys_ni_syscall /* sys_get_mempolicy */ - PTR sys_ni_syscall /* sys_set_mempolicy */ + PTR sys_get_mempolicy + PTR sys_set_mempolicy PTR sys_mq_open /* 5230 */ PTR sys_mq_unlink PTR sys_mq_timedsend @@ -431,4 +431,7 @@ EXPORT(sys_call_table) PTR sys_sched_setattr PTR sys_sched_getattr /* 5310 */ PTR sys_renameat2 + PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index c1dbcda4b816..ca6cbbe9805b 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -162,7 +162,7 @@ EXPORT(sysn32_call_table) PTR sys_getpeername PTR sys_socketpair PTR compat_sys_setsockopt - PTR sys_getsockopt + PTR compat_sys_getsockopt PTR __sys_clone /* 6055 */ PTR __sys_fork PTR compat_sys_execve @@ -339,9 +339,9 @@ EXPORT(sysn32_call_table) PTR compat_sys_clock_nanosleep PTR sys_tgkill PTR compat_sys_utimes /* 6230 */ - PTR sys_ni_syscall /* sys_mbind */ - PTR sys_ni_syscall /* sys_get_mempolicy */ - PTR sys_ni_syscall /* sys_set_mempolicy */ + PTR compat_sys_mbind + PTR compat_sys_get_mempolicy + PTR compat_sys_set_mempolicy PTR compat_sys_mq_open PTR sys_mq_unlink /* 6235 */ PTR compat_sys_mq_timedsend @@ -358,7 +358,7 @@ EXPORT(sysn32_call_table) PTR sys_inotify_init PTR sys_inotify_add_watch PTR sys_inotify_rm_watch - PTR sys_migrate_pages /* 6250 */ + PTR compat_sys_migrate_pages /* 6250 */ PTR sys_openat PTR sys_mkdirat PTR sys_mknodat @@ -379,7 +379,7 @@ EXPORT(sysn32_call_table) PTR sys_sync_file_range PTR sys_tee PTR compat_sys_vmsplice /* 6270 */ - PTR sys_move_pages + PTR compat_sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list PTR compat_sys_kexec_load @@ -424,4 +424,7 @@ EXPORT(sysn32_call_table) PTR sys_sched_setattr PTR sys_sched_getattr PTR sys_renameat2 /* 6315 */ + PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f1343ccd7ed7..9e10d11fbb84 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -113,15 +113,19 @@ trace_a_syscall: move s0, t2 # Save syscall pointer move a0, sp /* - * syscall number is in v0 unless we called syscall(__NR_###) + * absolute syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 * note: NR_syscall is the first O32 syscall but the macro is * only defined when compiling with -mabi=32 (CONFIG_32BIT) * therefore __NR_O32_Linux is used (4000) */ - addiu a1, v0, __NR_O32_Linux - bnez v0, 1f /* __NR_syscall at offset 0 */ - lw a1, PT_R4(sp) + .set push + .set reorder + subu t1, v0, __NR_O32_Linux + move a1, v0 + bnez t1, 1f /* __NR_syscall at offset 0 */ + lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ + .set pop 1: jal syscall_trace_enter @@ -473,9 +477,9 @@ EXPORT(sys32_call_table) PTR compat_sys_clock_nanosleep /* 4265 */ PTR sys_tgkill PTR compat_sys_utimes - PTR sys_ni_syscall /* sys_mbind */ - PTR sys_ni_syscall /* sys_get_mempolicy */ - PTR sys_ni_syscall /* 4270 sys_set_mempolicy */ + PTR compat_sys_mbind + PTR compat_sys_get_mempolicy + PTR compat_sys_set_mempolicy /* 4270 */ PTR compat_sys_mq_open PTR sys_mq_unlink PTR compat_sys_mq_timedsend @@ -492,7 +496,7 @@ EXPORT(sys32_call_table) PTR sys_inotify_init PTR sys_inotify_add_watch /* 4285 */ PTR sys_inotify_rm_watch - PTR sys_migrate_pages + PTR compat_sys_migrate_pages PTR compat_sys_openat PTR sys_mkdirat PTR sys_mknodat /* 4290 */ @@ -557,4 +561,7 @@ EXPORT(sys32_call_table) PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ PTR sys_renameat2 + PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index a842154d57dc..b3b8f0d9d4a7 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -24,6 +24,8 @@ #include <linux/debugfs.h> #include <linux/kexec.h> #include <linux/sizes.h> +#include <linux/device.h> +#include <linux/dma-contiguous.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -282,7 +284,7 @@ static unsigned long __init init_initrd(void) * Initialize the bootmem allocator. It also setup initrd related data * if needed. */ -#ifdef CONFIG_SGI_IP27 +#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA)) static void __init bootmem_init(void) { @@ -476,6 +478,7 @@ static void __init bootmem_init(void) * o bootmem_init() * o sparse_init() * o paging_init() + * o dma_continguous_reserve() * * At this stage the bootmem allocator is ready to use. * @@ -609,6 +612,7 @@ static void __init request_crashkernel(struct resource *res) static void __init arch_mem_init(char **cmdline_p) { + struct memblock_region *reg; extern void plat_mem_setup(void); /* call board setup routine */ @@ -675,6 +679,11 @@ static void __init arch_mem_init(char **cmdline_p) sparse_init(); plat_swiotlb_setup(); paging_init(); + + dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); + /* Tell bootmem about cma reserved memblock section */ + for_each_memblock(reserved, reg) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } static void __init resource_init(void) @@ -729,6 +738,25 @@ static void __init resource_init(void) } } +#ifdef CONFIG_SMP +static void __init prefill_possible_map(void) +{ + int i, possible = num_possible_cpus(); + + if (possible > nr_cpu_ids) + possible = nr_cpu_ids; + + for (i = 0; i < possible; i++) + set_cpu_possible(i, true); + for (; i < NR_CPUS; i++) + set_cpu_possible(i, false); + + nr_cpu_ids = possible; +} +#else +static inline void prefill_possible_map(void) {} +#endif + void __init setup_arch(char **cmdline_p) { cpu_probe(); @@ -752,6 +780,7 @@ void __init setup_arch(char **cmdline_p) resource_init(); plat_smp_setup(); + prefill_possible_map(); cpu_cache_init(); } diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 9c60d09e62a7..06805e09bcd3 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -22,7 +22,7 @@ /* * Determine which stack to use.. */ -extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, +extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size); /* Check and clear pending FPU exceptions in saved CSR */ extern int fpcsr_pending(unsigned int __user *fpcsr); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 9e60d117e41e..1d57605e4615 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -280,7 +280,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) return err; } -void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, +void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size) { unsigned long sp; @@ -295,9 +295,7 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, */ sp -= 32; - /* This is the X/Open sanctioned signal stack switching. */ - if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) - sp = current->sas_ss_sp + current->sas_ss_size; + sp = sigsp(sp, ksig); return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); } @@ -428,20 +426,20 @@ badframe: } #ifdef CONFIG_TRAD_SIGNALS -static int setup_frame(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, sigset_t *set) +static int setup_frame(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set) { struct sigframe __user *frame; int err = 0; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) - goto give_sigsegv; + return -EFAULT; err |= setup_sigcontext(regs, &frame->sf_sc); err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); if (err) - goto give_sigsegv; + return -EFAULT; /* * Arguments to signal handler: @@ -453,37 +451,32 @@ static int setup_frame(void *sig_return, struct k_sigaction *ka, * $25 and c0_epc point to the signal handler, $29 points to the * struct sigframe. */ - regs->regs[ 4] = signr; + regs->regs[ 4] = ksig->sig; regs->regs[ 5] = 0; regs->regs[ 6] = (unsigned long) &frame->sf_sc; regs->regs[29] = (unsigned long) frame; regs->regs[31] = (unsigned long) sig_return; - regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", current->comm, current->pid, frame, regs->cp0_epc, regs->regs[31]); return 0; - -give_sigsegv: - force_sigsegv(signr, current); - return -EFAULT; } #endif -static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, sigset_t *set, - siginfo_t *info) +static int setup_rt_frame(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set) { struct rt_sigframe __user *frame; int err = 0; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) - goto give_sigsegv; + return -EFAULT; /* Create siginfo. */ - err |= copy_siginfo_to_user(&frame->rs_info, info); + err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); @@ -493,7 +486,7 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); if (err) - goto give_sigsegv; + return -EFAULT; /* * Arguments to signal handler: @@ -505,22 +498,18 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, * $25 and c0_epc point to the signal handler, $29 points to * the struct rt_sigframe. */ - regs->regs[ 4] = signr; + regs->regs[ 4] = ksig->sig; regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; regs->regs[31] = (unsigned long) sig_return; - regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", current->comm, current->pid, frame, regs->cp0_epc, regs->regs[31]); return 0; - -give_sigsegv: - force_sigsegv(signr, current); - return -EFAULT; } struct mips_abi mips_abi = { @@ -534,8 +523,7 @@ struct mips_abi mips_abi = { .restart = __NR_restart_syscall }; -static void handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, struct pt_regs *regs) +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; @@ -557,7 +545,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, regs->regs[2] = EINTR; break; case ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { + if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->regs[2] = EINTR; break; } @@ -571,29 +559,23 @@ static void handle_signal(unsigned long sig, siginfo_t *info, regs->regs[0] = 0; /* Don't deal with this again. */ } - if (sig_uses_siginfo(ka)) + if (sig_uses_siginfo(&ksig->ka)) ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, - ka, regs, sig, oldset, info); + ksig, regs, oldset); else - ret = abi->setup_frame(vdso + abi->signal_return_offset, - ka, regs, sig, oldset); - - if (ret) - return; + ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig, + regs, oldset); - signal_delivered(sig, info, ka, regs, 0); + signal_setup_done(ret, ksig, 0); } static void do_signal(struct pt_regs *regs) { - struct k_sigaction ka; - siginfo_t info; - int signr; + struct ksignal ksig; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) { + if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ - handle_signal(signr, &info, &ka, regs); + handle_signal(&ksig, regs); return; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index bae2e6ee2109..d69179c0d49d 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -490,21 +490,21 @@ badframe: force_sig(SIGSEGV, current); } -static int setup_frame_32(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, sigset_t *set) +static int setup_frame_32(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set) { struct sigframe32 __user *frame; int err = 0; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) - goto give_sigsegv; + return -EFAULT; err |= setup_sigcontext32(regs, &frame->sf_sc); err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); if (err) - goto give_sigsegv; + return -EFAULT; /* * Arguments to signal handler: @@ -516,37 +516,32 @@ static int setup_frame_32(void *sig_return, struct k_sigaction *ka, * $25 and c0_epc point to the signal handler, $29 points to the * struct sigframe. */ - regs->regs[ 4] = signr; + regs->regs[ 4] = ksig->sig; regs->regs[ 5] = 0; regs->regs[ 6] = (unsigned long) &frame->sf_sc; regs->regs[29] = (unsigned long) frame; regs->regs[31] = (unsigned long) sig_return; - regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", current->comm, current->pid, frame, regs->cp0_epc, regs->regs[31]); return 0; - -give_sigsegv: - force_sigsegv(signr, current); - return -EFAULT; } -static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, sigset_t *set, - siginfo_t *info) +static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set) { struct rt_sigframe32 __user *frame; int err = 0; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) - goto give_sigsegv; + return -EFAULT; /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ - err |= copy_siginfo_to_user32(&frame->rs_info, info); + err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); @@ -556,7 +551,7 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); if (err) - goto give_sigsegv; + return -EFAULT; /* * Arguments to signal handler: @@ -568,22 +563,18 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, * $25 and c0_epc point to the signal handler, $29 points to * the struct rt_sigframe32. */ - regs->regs[ 4] = signr; + regs->regs[ 4] = ksig->sig; regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; regs->regs[31] = (unsigned long) sig_return; - regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", current->comm, current->pid, frame, regs->cp0_epc, regs->regs[31]); return 0; - -give_sigsegv: - force_sigsegv(signr, current); - return -EFAULT; } /* diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index b2241bb9cac1..f1d4751eead0 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -102,18 +102,18 @@ badframe: force_sig(SIGSEGV, current); } -static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) +static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig, + struct pt_regs *regs, sigset_t *set) { struct rt_sigframe_n32 __user *frame; int err = 0; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) - goto give_sigsegv; + return -EFAULT; /* Create siginfo. */ - err |= copy_siginfo_to_user32(&frame->rs_info, info); + err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); @@ -123,7 +123,7 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); if (err) - goto give_sigsegv; + return -EFAULT; /* * Arguments to signal handler: @@ -135,22 +135,18 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, * $25 and c0_epc point to the signal handler, $29 points to * the struct rt_sigframe. */ - regs->regs[ 4] = signr; + regs->regs[ 4] = ksig->sig; regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; regs->regs[31] = (unsigned long) sig_return; - regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", current->comm, current->pid, frame, regs->cp0_epc, regs->regs[31]); return 0; - -give_sigsegv: - force_sigsegv(signr, current); - return -EFAULT; } struct mips_abi mips_abi_n32 = { diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index df9e2bd9b2c2..06bb5ed6d80a 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -346,7 +346,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) int action, cpu = irq - IPI0_IRQ; spin_lock_irqsave(&ipi_lock, flags); - action = __get_cpu_var(ipi_action_mask); + action = __this_cpu_read(ipi_action_mask); per_cpu(ipi_action_mask, cpu) = 0; clear_c0_cause(cpu ? C_SW1 : C_SW0); spin_unlock_irqrestore(&ipi_lock, flags); diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 949f2c6827a0..e6e16a1d4add 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -14,13 +14,14 @@ #include <linux/smp.h> #include <linux/types.h> -#include <asm/cacheflush.h> +#include <asm/bcache.h> #include <asm/gic.h> #include <asm/mips-cm.h> #include <asm/mips-cpc.h> #include <asm/mips_mt.h> #include <asm/mipsregs.h> #include <asm/pm-cps.h> +#include <asm/r4kcache.h> #include <asm/smp-cps.h> #include <asm/time.h> #include <asm/uasm.h> @@ -132,8 +133,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) entry_code = (u32 *)&mips_cps_core_entry; UASM_i_LA(&entry_code, 3, (long)mips_cm_base); uasm_i_addiu(&entry_code, 16, 0, cca); - dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, - (void *)entry_code - (void *)&mips_cps_core_entry); + blast_dcache_range((unsigned long)&mips_cps_core_entry, + (unsigned long)entry_code); + bc_wback_inv((unsigned long)&mips_cps_core_entry, + (void *)entry_code - (void *)&mips_cps_core_entry); + __sync(); /* Allocate core boot configuration structs */ mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), @@ -360,7 +364,7 @@ void play_dead(void) static void wait_for_sibling_halt(void *ptr_cpu) { unsigned cpu = (unsigned)ptr_cpu; - unsigned vpe_id = cpu_data[cpu].vpe_id; + unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); unsigned halted; unsigned long flags; diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 3babf6e4f894..21f23add04f4 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -288,6 +288,7 @@ struct plat_smp_ops vsmp_smp_ops = { .prepare_cpus = vsmp_prepare_cpus, }; +#ifdef CONFIG_PROC_FS static int proc_cpuinfo_chain_call(struct notifier_block *nfb, unsigned long action_unused, void *data) { @@ -309,3 +310,4 @@ static int __init proc_cpuinfo_notifier_init(void) } subsys_initcall(proc_cpuinfo_notifier_init); +#endif diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 9bad52ede903..c94c4e92e17d 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -59,9 +59,16 @@ EXPORT_SYMBOL(smp_num_siblings); cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); +/* representing the core map of multi-core chips of each logical CPU */ +cpumask_t cpu_core_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_core_map); + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +/* representing cpus for which core maps can be computed */ +static cpumask_t cpu_core_setup_map; + cpumask_t cpu_coherent_mask; static inline void set_cpu_sibling_map(int cpu) @@ -72,7 +79,8 @@ static inline void set_cpu_sibling_map(int cpu) if (smp_num_siblings > 1) { for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (cpu_data[cpu].core == cpu_data[i].core) { + if (cpu_data[cpu].package == cpu_data[i].package && + cpu_data[cpu].core == cpu_data[i].core) { cpu_set(i, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[i]); } @@ -81,6 +89,20 @@ static inline void set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_map[cpu]); } +static inline void set_cpu_core_map(int cpu) +{ + int i; + + cpu_set(cpu, cpu_core_setup_map); + + for_each_cpu_mask(i, cpu_core_setup_map) { + if (cpu_data[cpu].package == cpu_data[i].package) { + cpu_set(i, cpu_core_map[cpu]); + cpu_set(cpu, cpu_core_map[i]); + } + } +} + struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); @@ -122,6 +144,7 @@ asmlinkage void start_secondary(void) set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); + set_cpu_core_map(cpu); cpu_set(cpu, cpu_callin_map); @@ -175,6 +198,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) current_thread_info()->cpu = 0; mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); + set_cpu_core_map(0); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 51706d6dd5b0..22b19c275044 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -90,6 +90,7 @@ extern asmlinkage void handle_mt(void); extern asmlinkage void handle_dsp(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); +extern void tlb_do_page_fault_0(void); void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); @@ -1088,13 +1089,19 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, static int enable_restore_fp_context(int msa) { - int err, was_fpu_owner; + int err, was_fpu_owner, prior_msa; if (!used_math()) { /* First time FP context user. */ + preempt_disable(); err = init_fpu(); - if (msa && !err) + if (msa && !err) { enable_msa(); + _init_msa_upper(); + set_thread_flag(TIF_USEDMSA); + set_thread_flag(TIF_MSA_CTX_LIVE); + } + preempt_enable(); if (!err) set_used_math(); return err; @@ -1134,10 +1141,11 @@ static int enable_restore_fp_context(int msa) * This task is using or has previously used MSA. Thus we require * that Status.FR == 1. */ + preempt_disable(); was_fpu_owner = is_fpu_owner(); - err = own_fpu(0); + err = own_fpu_inatomic(0); if (err) - return err; + goto out; enable_msa(); write_msa_csr(current->thread.fpu.msacsr); @@ -1146,13 +1154,42 @@ static int enable_restore_fp_context(int msa) /* * If this is the first time that the task is using MSA and it has * previously used scalar FP in this time slice then we already nave - * FP context which we shouldn't clobber. + * FP context which we shouldn't clobber. We do however need to clear + * the upper 64b of each vector register so that this task has no + * opportunity to see data left behind by another. */ - if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) - return 0; + prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); + if (!prior_msa && was_fpu_owner) { + _init_msa_upper(); + + goto out; + } + + if (!prior_msa) { + /* + * Restore the least significant 64b of each vector register + * from the existing scalar FP context. + */ + _restore_fp(current); + + /* + * The task has not formerly used MSA, so clear the upper 64b + * of each vector register such that it cannot see data left + * behind by another task. + */ + _init_msa_upper(); + } else { + /* We need to restore the vector context. */ + restore_msa(current); + + /* Restore the scalar FP control & status register */ + if (!was_fpu_owner) + asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); + } + +out: + preempt_enable(); - /* We need to restore the vector context. */ - restore_msa(current); return 0; } @@ -2114,6 +2151,12 @@ void __init trap_init(void) set_except_vector(15, handle_fpe); set_except_vector(16, handle_ftlb); + + if (cpu_has_rixiex) { + set_except_vector(19, tlb_do_page_fault_0); + set_except_vector(20, tlb_do_page_fault_0); + } + set_except_vector(21, handle_msa); set_except_vector(22, handle_mdmx); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 2b3517214d6d..e11906dff885 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -690,7 +690,6 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sdc1_op: die_if_kernel("Unaligned FP access in kernel code", regs); BUG_ON(!used_math()); - BUG_ON(!is_fpu_owner()); lose_fpu(1); /* Save FPU state for the emulator. */ res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 78d87bbc99db..401fe027c261 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm -kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ - kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ - kvm_mips_dyntrans.o kvm_trap_emul.o +kvm-objs := $(common-objs) mips.o emulate.o locore.o \ + interrupt.o stats.o commpage.o \ + dyntrans.o trap_emul.o obj-$(CONFIG_KVM) += kvm.o -obj-y += kvm_cb.o kvm_tlb.o +obj-y += callback.o tlb.o diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/callback.c index 313c2e37b978..313c2e37b978 100644 --- a/arch/mips/kvm/kvm_cb.c +++ b/arch/mips/kvm/callback.c diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c new file mode 100644 index 000000000000..2d6e976d1add --- /dev/null +++ b/arch/mips/kvm/commpage.c @@ -0,0 +1,33 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * commpage, currently used for Virtual COP0 registers. + * Mapped into the guest kernel @ 0x0. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/bootmem.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/mmu_context.h> + +#include <linux/kvm_host.h> + +#include "commpage.h" + +void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) +{ + struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; + + /* Specific init values for fields */ + vcpu->arch.cop0 = &page->cop0; +} diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h new file mode 100644 index 000000000000..08c5fa2bbc0f --- /dev/null +++ b/arch/mips/kvm/commpage.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: commpage: mapped into get kernel space + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ + +#ifndef __KVM_MIPS_COMMPAGE_H__ +#define __KVM_MIPS_COMMPAGE_H__ + +struct kvm_mips_commpage { + /* COP0 state is mapped into Guest kernel via commpage */ + struct mips_coproc cop0; +}; + +#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 + +extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); + +#endif /* __KVM_MIPS_COMMPAGE_H__ */ diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/dyntrans.c index b80e41d858fd..521121bdebff 100644 --- a/arch/mips/kvm/kvm_mips_dyntrans.c +++ b/arch/mips/kvm/dyntrans.c @@ -1,13 +1,13 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #include <linux/errno.h> #include <linux/err.h> @@ -18,7 +18,7 @@ #include <linux/bootmem.h> #include <asm/cacheflush.h> -#include "kvm_mips_comm.h" +#include "commpage.h" #define SYNCI_TEMPLATE 0x041f0000 #define SYNCI_BASE(x) (((x) >> 21) & 0x1f) @@ -28,9 +28,8 @@ #define CLEAR_TEMPLATE 0x00000020 #define SW_TEMPLATE 0xac000000 -int -kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, - struct kvm_vcpu *vcpu) +int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu) { int result = 0; unsigned long kseg0_opc; @@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, } /* - * Address based CACHE instructions are transformed into synci(s). A little heavy - * for just D-cache invalidates, but avoids an expensive trap + * Address based CACHE instructions are transformed into synci(s). A little + * heavy for just D-cache invalidates, but avoids an expensive trap */ -int -kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, - struct kvm_vcpu *vcpu) +int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu) { int result = 0; unsigned long kseg0_opc; @@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, return result; } -int -kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) +int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) { int32_t rt, rd, sel; uint32_t mfc0_inst; @@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) return 0; } -int -kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) +int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) { int32_t rt, rd, sel; uint32_t mtc0_inst = SW_TEMPLATE; diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/emulate.c index 8d4840090082..fb3e8dfd1ff6 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/emulate.c @@ -1,13 +1,13 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: Instruction/Exception emulation -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: Instruction/Exception emulation + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #include <linux/errno.h> #include <linux/err.h> @@ -29,9 +29,9 @@ #include <asm/r4kcache.h> #define CONFIG_MIPS_MT -#include "kvm_mips_opcode.h" -#include "kvm_mips_int.h" -#include "kvm_mips_comm.h" +#include "opcode.h" +#include "interrupt.h" +#include "commpage.h" #include "trace.h" @@ -51,18 +51,14 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, if (epc & 3) goto unaligned; - /* - * Read the instruction - */ + /* Read the instruction */ insn.word = kvm_get_inst((uint32_t *) epc, vcpu); if (insn.word == KVM_INVALID_INST) return KVM_INVALID_INST; switch (insn.i_format.opcode) { - /* - * jr and jalr are in r_format format. - */ + /* jr and jalr are in r_format format. */ case spec_op: switch (insn.r_format.func) { case jalr_op: @@ -124,18 +120,16 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, dspcontrol = rddsp(0x01); - if (dspcontrol >= 32) { + if (dspcontrol >= 32) epc = epc + 4 + (insn.i_format.simmediate << 2); - } else + else epc += 8; nextpc = epc; break; } break; - /* - * These are unconditional and in j_format. - */ + /* These are unconditional and in j_format. */ case jal_op: arch->gprs[31] = instpc + 8; case j_op: @@ -146,9 +140,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, nextpc = epc; break; - /* - * These are conditional and in i_format. - */ + /* These are conditional and in i_format. */ case beq_op: case beql_op: if (arch->gprs[insn.i_format.rs] == @@ -189,22 +181,20 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, nextpc = epc; break; - /* - * And now the FPA/cp1 branch instructions. - */ + /* And now the FPA/cp1 branch instructions. */ case cop1_op: - printk("%s: unsupported cop1_op\n", __func__); + kvm_err("%s: unsupported cop1_op\n", __func__); break; } return nextpc; unaligned: - printk("%s: unaligned epc\n", __func__); + kvm_err("%s: unaligned epc\n", __func__); return nextpc; sigill: - printk("%s: DSP branch but not DSP ASE\n", __func__); + kvm_err("%s: DSP branch but not DSP ASE\n", __func__); return nextpc; } @@ -219,7 +209,8 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) er = EMULATE_FAIL; } else { vcpu->arch.pc = branch_pc; - kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); + kvm_debug("BD update_pc(): New PC: %#lx\n", + vcpu->arch.pc); } } else vcpu->arch.pc += 4; @@ -240,6 +231,7 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; + return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); } @@ -392,7 +384,6 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, return now; } - /** * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. * @vcpu: Virtual CPU. @@ -760,8 +751,8 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) kvm_clear_c0_guest_status(cop0, ST0_ERL); vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else { - printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", - vcpu->arch.pc); + kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", + vcpu->arch.pc); er = EMULATE_FAIL; } @@ -770,8 +761,6 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) { - enum emulation_result er = EMULATE_DONE; - kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, vcpu->arch.pending_exceptions); @@ -781,8 +770,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) vcpu->arch.wait = 1; kvm_vcpu_block(vcpu); - /* We we are runnable, then definitely go off to user space to check if any - * I/O interrupts are pending. + /* + * We we are runnable, then definitely go off to user space to + * check if any I/O interrupts are pending. */ if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { clear_bit(KVM_REQ_UNHALT, &vcpu->requests); @@ -790,20 +780,20 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) } } - return er; + return EMULATE_DONE; } -/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch - * this, if things ever change +/* + * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that + * we can catch this, if things ever change */ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_FAIL; uint32_t pc = vcpu->arch.pc; - printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); - return er; + kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); + return EMULATE_FAIL; } /* Write Guest TLB Entry @ Index */ @@ -811,88 +801,76 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; int index = kvm_read_c0_guest_index(cop0); - enum emulation_result er = EMULATE_DONE; struct kvm_mips_tlb *tlb = NULL; uint32_t pc = vcpu->arch.pc; if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { - printk("%s: illegal index: %d\n", __func__, index); - printk - ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0), - kvm_read_c0_guest_pagemask(cop0)); + kvm_debug("%s: illegal index: %d\n", __func__, index); + kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0), + kvm_read_c0_guest_pagemask(cop0)); index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; } tlb = &vcpu->arch.guest_tlb[index]; -#if 1 - /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); -#endif tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); - kvm_debug - ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), - kvm_read_c0_guest_pagemask(cop0)); + kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0), + kvm_read_c0_guest_pagemask(cop0)); - return er; + return EMULATE_DONE; } /* Write Guest TLB Entry @ Random Index */ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; struct kvm_mips_tlb *tlb = NULL; uint32_t pc = vcpu->arch.pc; int index; -#if 1 get_random_bytes(&index, sizeof(index)); index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); -#else - index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; -#endif - - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { - printk("%s: illegal index: %d\n", __func__, index); - return EMULATE_FAIL; - } tlb = &vcpu->arch.guest_tlb[index]; -#if 1 - /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); -#endif tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); - kvm_debug - ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0)); + kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0)); - return er; + return EMULATE_DONE; } enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; long entryhi = kvm_read_c0_guest_entryhi(cop0); - enum emulation_result er = EMULATE_DONE; uint32_t pc = vcpu->arch.pc; int index = -1; @@ -903,12 +881,12 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, index); - return er; + return EMULATE_DONE; } -enum emulation_result -kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, + uint32_t cause, struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; @@ -922,9 +900,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) { + if (er == EMULATE_FAIL) return er; - } copz = (inst >> 21) & 0x1f; rt = (inst >> 16) & 0x1f; @@ -949,7 +926,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, er = kvm_mips_emul_tlbp(vcpu); break; case rfe_op: - printk("!!!COP0_RFE!!!\n"); + kvm_err("!!!COP0_RFE!!!\n"); break; case eret_op: er = kvm_mips_emul_eret(vcpu); @@ -973,8 +950,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_mfc0(inst, opc, vcpu); #endif - } - else { + } else { vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; #ifdef CONFIG_KVM_MIPS_DYN_TRANS @@ -999,8 +975,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, if ((rd == MIPS_CP0_TLB_INDEX) && (vcpu->arch.gprs[rt] >= KVM_MIPS_GUEST_TLB_SIZE)) { - printk("Invalid TLB Index: %ld", - vcpu->arch.gprs[rt]); + kvm_err("Invalid TLB Index: %ld", + vcpu->arch.gprs[rt]); er = EMULATE_FAIL; break; } @@ -1010,21 +986,19 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, kvm_change_c0_guest_ebase(cop0, ~(C0_EBASE_CORE_MASK), vcpu->arch.gprs[rt]); - printk("MTCz, cop0->reg[EBASE]: %#lx\n", - kvm_read_c0_guest_ebase(cop0)); + kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", + kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { uint32_t nasid = - vcpu->arch.gprs[rt] & ASID_MASK; - if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) - && + vcpu->arch.gprs[rt] & ASID_MASK; + if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && ((kvm_read_c0_guest_entryhi(cop0) & ASID_MASK) != nasid)) { - - kvm_debug - ("MTCz, change ASID from %#lx to %#lx\n", - kvm_read_c0_guest_entryhi(cop0) & - ASID_MASK, - vcpu->arch.gprs[rt] & ASID_MASK); + kvm_debug("MTCz, change ASID from %#lx to %#lx\n", + kvm_read_c0_guest_entryhi(cop0) + & ASID_MASK, + vcpu->arch.gprs[rt] + & ASID_MASK); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); @@ -1049,7 +1023,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { kvm_write_c0_guest_status(cop0, vcpu->arch.gprs[rt]); - /* Make sure that CU1 and NMI bits are never set */ + /* + * Make sure that CU1 and NMI bits are + * never set + */ kvm_clear_c0_guest_status(cop0, (ST0_CU1 | ST0_NMI)); @@ -1058,6 +1035,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, #endif } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { uint32_t old_cause, new_cause; + old_cause = kvm_read_c0_guest_cause(cop0); new_cause = vcpu->arch.gprs[rt]; /* Update R/W bits */ @@ -1082,9 +1060,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, break; case dmtc_op: - printk - ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", - vcpu->arch.pc, rt, rd, sel); + kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", + vcpu->arch.pc, rt, rd, sel); er = EMULATE_FAIL; break; @@ -1115,7 +1092,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, cop0->reg[MIPS_CP0_STATUS][2] & 0xf; uint32_t pss = (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; - /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ + /* + * We don't support any shadow register sets, so + * SRSCtl[PSS] == SRSCtl[CSS] = 0 + */ if (css || pss) { er = EMULATE_FAIL; break; @@ -1126,21 +1106,17 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, } break; default: - printk - ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", - vcpu->arch.pc, copz); + kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", + vcpu->arch.pc, copz); er = EMULATE_FAIL; break; } } done: - /* - * Rollback PC only if emulation was unsuccessful - */ - if (er == EMULATE_FAIL) { + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) vcpu->arch.pc = curr_pc; - } dont_update_pc: /* @@ -1152,9 +1128,9 @@ dont_update_pc: return er; } -enum emulation_result -kvm_mips_emulate_store(uint32_t inst, uint32_t cause, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; int32_t op, base, rt, offset; @@ -1252,24 +1228,21 @@ kvm_mips_emulate_store(uint32_t inst, uint32_t cause, break; default: - printk("Store not yet supported"); + kvm_err("Store not yet supported"); er = EMULATE_FAIL; break; } - /* - * Rollback PC if emulation was unsuccessful - */ - if (er == EMULATE_FAIL) { + /* Rollback PC if emulation was unsuccessful */ + if (er == EMULATE_FAIL) vcpu->arch.pc = curr_pc; - } return er; } -enum emulation_result -kvm_mips_emulate_load(uint32_t inst, uint32_t cause, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; int32_t op, base, rt, offset; @@ -1364,7 +1337,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, break; default: - printk("Load not yet supported"); + kvm_err("Load not yet supported"); er = EMULATE_FAIL; break; } @@ -1383,7 +1356,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) gfn = va >> PAGE_SHIFT; if (gfn >= kvm->arch.guest_pmap_npages) { - printk("%s: Invalid gfn: %#llx\n", __func__, gfn); + kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); kvm_mips_dump_host_tlbs(); kvm_arch_vcpu_dump_regs(vcpu); return -1; @@ -1391,7 +1364,8 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) pfn = kvm->arch.guest_pmap[gfn]; pa = (pfn << PAGE_SHIFT) | offset; - printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); + kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, + CKSEG0ADDR(pa)); local_flush_icache_range(CKSEG0ADDR(pa), 32); return 0; @@ -1410,13 +1384,12 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) #define MIPS_CACHE_DCACHE 0x1 #define MIPS_CACHE_SEC 0x3 -enum emulation_result -kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - extern void (*r4k_blast_dcache) (void); - extern void (*r4k_blast_icache) (void); enum emulation_result er = EMULATE_DONE; int32_t offset, cache, op_inst, op, base; struct kvm_vcpu_arch *arch = &vcpu->arch; @@ -1443,22 +1416,23 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", cache, op, base, arch->gprs[base], offset); - /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate - * the caches entirely by stepping through all the ways/indexes + /* + * Treat INDEX_INV as a nop, basically issued by Linux on startup to + * invalidate the caches entirely by stepping through all the + * ways/indexes */ if (op == MIPS_CACHE_OP_INDEX_INV) { - kvm_debug - ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, - arch->gprs[base], offset); + kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, + arch->gprs[base], offset); if (cache == MIPS_CACHE_DCACHE) r4k_blast_dcache(); else if (cache == MIPS_CACHE_ICACHE) r4k_blast_icache(); else { - printk("%s: unsupported CACHE INDEX operation\n", - __func__); + kvm_err("%s: unsupported CACHE INDEX operation\n", + __func__); return EMULATE_FAIL; } @@ -1470,21 +1444,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, preempt_disable(); if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { - - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) kvm_mips_handle_kseg0_tlb_fault(va, vcpu); - } } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { int index; /* If an entry already exists then skip */ - if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { + if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) goto skip_fault; - } - /* If address not in the guest TLB, then give the guest a fault, the - * resulting handler will do the right thing + /* + * If address not in the guest TLB, then give the guest a fault, + * the resulting handler will do the right thing */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | (kvm_read_c0_guest_entryhi @@ -1499,23 +1471,28 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, goto dont_update_pc; } else { struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; - /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ + /* + * Check if the entry is valid, if not then setup a TLB + * invalid exception to the guest + */ if (!TLB_IS_VALID(*tlb, va)) { er = kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); preempt_enable(); goto dont_update_pc; } else { - /* We fault an entry from the guest tlb to the shadow host TLB */ + /* + * We fault an entry from the guest tlb to the + * shadow host TLB + */ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, NULL); } } } else { - printk - ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - cache, op, base, arch->gprs[base], offset); + kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); er = EMULATE_FAIL; preempt_enable(); goto dont_update_pc; @@ -1530,7 +1507,10 @@ skip_fault: flush_dcache_line(va); #ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ + /* + * Replace the CACHE instruction, with a SYNCI, not the same, + * but avoids a trap + */ kvm_mips_trans_cache_va(inst, opc, vcpu); #endif } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { @@ -1542,9 +1522,8 @@ skip_fault: kvm_mips_trans_cache_va(inst, opc, vcpu); #endif } else { - printk - ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - cache, op, base, arch->gprs[base], offset); + kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); er = EMULATE_FAIL; preempt_enable(); goto dont_update_pc; @@ -1552,28 +1531,23 @@ skip_fault: preempt_enable(); - dont_update_pc: - /* - * Rollback PC - */ +dont_update_pc: + /* Rollback PC */ vcpu->arch.pc = curr_pc; - done: +done: return er; } -enum emulation_result -kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; uint32_t inst; - /* - * Fetch the instruction. - */ - if (cause & CAUSEF_BD) { + /* Fetch the instruction. */ + if (cause & CAUSEF_BD) opc += 1; - } inst = kvm_get_inst(opc, vcpu); @@ -1601,8 +1575,8 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, break; default: - printk("Instruction emulation not supported (%p/%#x)\n", opc, - inst); + kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, + inst); kvm_arch_vcpu_dump_regs(vcpu); er = EMULATE_FAIL; break; @@ -1611,9 +1585,10 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, return er; } -enum emulation_result -kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; @@ -1638,20 +1613,20 @@ kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { - printk("Trying to deliver SYSCALL when EXL is already set\n"); + kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); er = EMULATE_FAIL; } return er; } -enum emulation_result -kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); @@ -1688,16 +1663,16 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); - return er; + return EMULATE_DONE; } -enum emulation_result -kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); @@ -1734,16 +1709,16 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); - return er; + return EMULATE_DONE; } -enum emulation_result -kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); @@ -1778,16 +1753,16 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); - return er; + return EMULATE_DONE; } -enum emulation_result -kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); @@ -1822,13 +1797,13 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); - return er; + return EMULATE_DONE; } /* TLBMOD: store into address matching TLB with Dirty bit off */ -enum emulation_result -kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; #ifdef DEBUG @@ -1837,9 +1812,7 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); int index; - /* - * If address not in the guest TLB, then we are in trouble - */ + /* If address not in the guest TLB, then we are in trouble */ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); if (index < 0) { /* XXXKYMA Invalidate and retry */ @@ -1856,15 +1829,15 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, return er; } -enum emulation_result -kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1895,16 +1868,16 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); - return er; + return EMULATE_DONE; } -enum emulation_result -kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1924,12 +1897,13 @@ kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, (T_COP_UNUSABLE << CAUSEB_EXCCODE)); kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); - return er; + return EMULATE_DONE; } -enum emulation_result -kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; @@ -1961,9 +1935,10 @@ kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, return er; } -enum emulation_result -kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; @@ -1988,16 +1963,14 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { - printk("Trying to deliver BP when EXL is already set\n"); + kvm_err("Trying to deliver BP when EXL is already set\n"); er = EMULATE_FAIL; } return er; } -/* - * ll/sc, rdhwr, sync emulation - */ +/* ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 #define BASE 0x03e00000 @@ -2012,9 +1985,9 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, #define SYNC 0x0000000f #define RDHWR 0x0000003b -enum emulation_result -kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; @@ -2031,16 +2004,14 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, if (er == EMULATE_FAIL) return er; - /* - * Fetch the instruction. - */ + /* Fetch the instruction. */ if (cause & CAUSEF_BD) opc += 1; inst = kvm_get_inst(opc, vcpu); if (inst == KVM_INVALID_INST) { - printk("%s: Cannot get inst @ %p\n", __func__, opc); + kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); return EMULATE_FAIL; } @@ -2099,15 +2070,15 @@ emulate_ri: return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); } -enum emulation_result -kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) +enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, + struct kvm_run *run) { unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; enum emulation_result er = EMULATE_DONE; unsigned long curr_pc; if (run->mmio.len > sizeof(*gpr)) { - printk("Bad MMIO length: %d", run->mmio.len); + kvm_err("Bad MMIO length: %d", run->mmio.len); er = EMULATE_FAIL; goto done; } @@ -2142,18 +2113,18 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) } if (vcpu->arch.pending_load_cause & CAUSEF_BD) - kvm_debug - ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", - vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, - vcpu->mmio_needed); + kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", + vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, + vcpu->mmio_needed); done: return er; } -static enum emulation_result -kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; struct mips_coproc *cop0 = vcpu->arch.cop0; @@ -2181,16 +2152,17 @@ kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, exccode, kvm_read_c0_guest_epc(cop0), kvm_read_c0_guest_badvaddr(cop0)); } else { - printk("Trying to deliver EXC when EXL is already set\n"); + kvm_err("Trying to deliver EXC when EXL is already set\n"); er = EMULATE_FAIL; } return er; } -enum emulation_result -kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_check_privilege(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; @@ -2215,10 +2187,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, break; case T_TLB_LD_MISS: - /* We we are accessing Guest kernel space, then send an address error exception to the guest */ + /* + * We we are accessing Guest kernel space, then send an + * address error exception to the guest + */ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { - printk("%s: LD MISS @ %#lx\n", __func__, - badvaddr); + kvm_debug("%s: LD MISS @ %#lx\n", __func__, + badvaddr); cause &= ~0xff; cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); er = EMULATE_PRIV_FAIL; @@ -2226,10 +2201,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, break; case T_TLB_ST_MISS: - /* We we are accessing Guest kernel space, then send an address error exception to the guest */ + /* + * We we are accessing Guest kernel space, then send an + * address error exception to the guest + */ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { - printk("%s: ST MISS @ %#lx\n", __func__, - badvaddr); + kvm_debug("%s: ST MISS @ %#lx\n", __func__, + badvaddr); cause &= ~0xff; cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); er = EMULATE_PRIV_FAIL; @@ -2237,8 +2215,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, break; case T_ADDR_ERR_ST: - printk("%s: address error ST @ %#lx\n", __func__, - badvaddr); + kvm_debug("%s: address error ST @ %#lx\n", __func__, + badvaddr); if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { cause &= ~0xff; cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); @@ -2246,8 +2224,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, er = EMULATE_PRIV_FAIL; break; case T_ADDR_ERR_LD: - printk("%s: address error LD @ %#lx\n", __func__, - badvaddr); + kvm_debug("%s: address error LD @ %#lx\n", __func__, + badvaddr); if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { cause &= ~0xff; cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); @@ -2260,21 +2238,23 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, } } - if (er == EMULATE_PRIV_FAIL) { + if (er == EMULATE_PRIV_FAIL) kvm_mips_emulate_exc(cause, opc, run, vcpu); - } + return er; } -/* User Address (UA) fault, this could happen if +/* + * User Address (UA) fault, this could happen if * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this * case we pass on the fault to the guest kernel and let it handle it. * (2) TLB entry is present in the Guest TLB but not in the shadow, in this * case we inject the TLB from the Guest TLB into the shadow host TLB */ -enum emulation_result -kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, - struct kvm_run *run, struct kvm_vcpu *vcpu) +enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; @@ -2284,10 +2264,11 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); - /* KVM would not have got the exception if this entry was valid in the shadow host TLB - * Check the Guest TLB, if the entry is not there then send the guest an - * exception. The guest exc handler should then inject an entry into the - * guest TLB + /* + * KVM would not have got the exception if this entry was valid in the + * shadow host TLB. Check the Guest TLB, if the entry is not there then + * send the guest an exception. The guest exc handler should then inject + * an entry into the guest TLB. */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | @@ -2299,13 +2280,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, } else if (exccode == T_TLB_ST_MISS) { er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); } else { - printk("%s: invalid exc code: %d\n", __func__, exccode); + kvm_err("%s: invalid exc code: %d\n", __func__, + exccode); er = EMULATE_FAIL; } } else { struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; - /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ + /* + * Check if the entry is valid, if not then setup a TLB invalid + * exception to the guest + */ if (!TLB_IS_VALID(*tlb, va)) { if (exccode == T_TLB_LD_MISS) { er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, @@ -2314,15 +2299,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, er = kvm_mips_emulate_tlbinv_st(cause, opc, run, vcpu); } else { - printk("%s: invalid exc code: %d\n", __func__, - exccode); + kvm_err("%s: invalid exc code: %d\n", __func__, + exccode); er = EMULATE_FAIL; } } else { - kvm_debug - ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", - tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); - /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ + kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", + tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); + /* + * OK we have a Guest TLB entry, now inject it into the + * shadow host TLB + */ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, NULL); } diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/interrupt.c index 1e5de16afe29..9b4445940c2b 100644 --- a/arch/mips/kvm/kvm_mips_int.c +++ b/arch/mips/kvm/interrupt.c @@ -1,13 +1,13 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: Interrupt delivery -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: Interrupt delivery + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #include <linux/errno.h> #include <linux/err.h> @@ -20,7 +20,7 @@ #include <linux/kvm_host.h> -#include "kvm_mips_int.h" +#include "interrupt.h" void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) { @@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) { - /* Cause bits to reflect the pending timer interrupt, + /* + * Cause bits to reflect the pending timer interrupt, * the EXC code will be set when we are actually * delivering the interrupt: */ @@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); } -void -kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq) { int intr = (int)irq->irq; - /* Cause bits to reflect the pending IO interrupt, + /* + * Cause bits to reflect the pending IO interrupt, * the EXC code will be set when we are actually * delivering the interrupt: */ @@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) } -void -kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq) +void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq) { int intr = (int)irq->irq; + switch (intr) { case -2: kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); @@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, } /* Deliver the interrupt of the corresponding priority, if possible. */ -int -kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, - uint32_t cause) +int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause) { int allowed = 0; uint32_t exccode; @@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, /* Are we allowed to deliver the interrupt ??? */ if (allowed) { - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); @@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, return allowed; } -int -kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, - uint32_t cause) +int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause) { return 1; } diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/interrupt.h index 20da7d29eede..4ab4bdfad703 100644 --- a/arch/mips/kvm/kvm_mips_int.h +++ b/arch/mips/kvm/interrupt.h @@ -1,14 +1,15 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: Interrupts -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: Interrupts + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ -/* MIPS Exception Priorities, exceptions (including interrupts) are queued up +/* + * MIPS Exception Priorities, exceptions (including interrupts) are queued up * for the guest in the order specified by their priorities */ @@ -27,6 +28,9 @@ #define MIPS_EXC_MAX 12 /* XXXSL More to follow */ +extern char mips32_exception[], mips32_exceptionEnd[]; +extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; + #define C_TI (_ULCAST_(1) << 30) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h deleted file mode 100644 index a4a8c85cc8f7..000000000000 --- a/arch/mips/kvm/kvm_mips_comm.h +++ /dev/null @@ -1,23 +0,0 @@ -/* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: commpage: mapped into get kernel space -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ - -#ifndef __KVM_MIPS_COMMPAGE_H__ -#define __KVM_MIPS_COMMPAGE_H__ - -struct kvm_mips_commpage { - struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ -}; - -#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 - -extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); - -#endif /* __KVM_MIPS_COMMPAGE_H__ */ diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c deleted file mode 100644 index 3873b1ecc40f..000000000000 --- a/arch/mips/kvm/kvm_mips_commpage.c +++ /dev/null @@ -1,37 +0,0 @@ -/* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* commpage, currently used for Virtual COP0 registers. -* Mapped into the guest kernel @ 0x0. -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/bootmem.h> -#include <asm/page.h> -#include <asm/cacheflush.h> -#include <asm/mmu_context.h> - -#include <linux/kvm_host.h> - -#include "kvm_mips_comm.h" - -void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) -{ - struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; - memset(page, 0, sizeof(struct kvm_mips_commpage)); - - /* Specific init values for fields */ - vcpu->arch.cop0 = &page->cop0; - memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc)); - - return; -} diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h deleted file mode 100644 index 86d3b4cc348b..000000000000 --- a/arch/mips/kvm/kvm_mips_opcode.h +++ /dev/null @@ -1,24 +0,0 @@ -/* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ - -/* - * Define opcode values not defined in <asm/isnt.h> - */ - -#ifndef __KVM_MIPS_OPCODE_H__ -#define __KVM_MIPS_OPCODE_H__ - -/* COP0 Ops */ -#define mfmcz_op 0x0b /* 01011 */ -#define wrpgpr_op 0x0e /* 01110 */ - -/* COP0 opcodes (only if COP0 and CO=1): */ -#define wait_op 0x20 /* 100000 */ - -#endif /* __KVM_MIPS_OPCODE_H__ */ diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/locore.S index 033ac343e72c..d7279c03c517 100644 --- a/arch/mips/kvm/kvm_locore.S +++ b/arch/mips/kvm/locore.S @@ -16,7 +16,6 @@ #include <asm/stackframe.h> #include <asm/asm-offsets.h> - #define _C_LABEL(x) x #define MIPSX(name) mips32_ ## name #define CALLFRAME_SIZ 32 @@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run) LONG_S $24, PT_R24(k1) LONG_S $25, PT_R25(k1) - /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ + /* + * XXXKYMA k0/k1 not saved, not being used if we got here through + * an ioctl() + */ LONG_S $28, PT_R28(k1) LONG_S $29, PT_R29(k1) @@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run) /* Save the kernel gp as well */ LONG_S gp, VCPU_HOST_GP(k1) - /* Setup status register for running the guest in UM, interrupts are disabled */ + /* + * Setup status register for running the guest in UM, interrupts + * are disabled + */ li k0, (ST0_EXL | KSU_USER | ST0_BEV) mtc0 k0, CP0_STATUS ehb @@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run) mtc0 k0, CP0_STATUS ehb - /* Set Guest EPC */ LONG_L t0, VCPU_PC(k1) mtc0 t0, CP0_EPC @@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid) INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 1: - /* t1: contains the base of the ASID array, need to get the cpu id */ + /* t1: contains the base of the ASID array, need to get the cpu id */ LONG_L t2, TI_CPU($28) /* smp_processor_id */ INT_SLL t2, t2, 2 /* x4 */ REG_ADDU t3, t1, t2 @@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1) eret VECTOR(MIPSX(exception), unknown) -/* - * Find out what mode we came from and jump to the proper handler. - */ +/* Find out what mode we came from and jump to the proper handler. */ mtc0 k0, CP0_ERROREPC #01: Save guest k0 ehb #02: @@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown) INT_SRL k0, k0, 10 #03: Get rid of CPUNum INT_SLL k0, k0, 10 #04 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 - INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 + INT_ADDIU k0, k0, 0x2000 #06: Exception handler is + # installed @ offset 0x2000 j k0 #07: jump to the function nop #08: branch delay slot VECTOR_END(MIPSX(exceptionEnd)) @@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd)) /* * Generic Guest exception handler. We end up here when the guest * does something that causes a trap to kernel mode. - * */ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) /* Get the VCPU pointer from DDTATA_LO */ @@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) LONG_S $30, VCPU_R30(k1) LONG_S $31, VCPU_R31(k1) - /* We need to save hi/lo and restore them on - * the way out - */ + /* We need to save hi/lo and restore them on the way out */ mfhi t0 LONG_S t0, VCPU_HI(k1) @@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) /* Save pointer to run in s0, will be saved by the compiler */ move s0, a0 - /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to - * process the exception */ + /* + * Save Host level EPC, BadVaddr and Cause to VCPU, useful to + * process the exception + */ mfc0 k0,CP0_EPC LONG_S k0, VCPU_PC(k1) @@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) LONG_L k0, VCPU_HOST_EBASE(k1) mtc0 k0,CP0_EBASE - /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ .set at and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) @@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) /* Saved host state */ INT_ADDIU sp, sp, -PT_SIZE - /* XXXKYMA do we need to load the host ASID, maybe not because the + /* + * XXXKYMA do we need to load the host ASID, maybe not because the * kernel entries are marked GLOBAL, need to verify */ @@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) /* Jump to handler */ FEXPORT(__kvm_mips_jump_to_handler) - /* XXXKYMA: not sure if this is safe, how large is the stack?? + /* + * XXXKYMA: not sure if this is safe, how large is the stack?? * Now jump to the kvm_mips_handle_exit() to see if we can deal - * with this in the kernel */ + * with this in the kernel + */ PTR_LA t9, kvm_mips_handle_exit jalr.hb t9 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ @@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler) di ehb - /* XXXKYMA: k0/k1 could have been blown away if we processed + /* + * XXXKYMA: k0/k1 could have been blown away if we processed * an exception while we were handling the exception from the * guest, reload k1 */ @@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler) move k1, s1 INT_ADDIU k1, k1, VCPU_HOST_ARCH - /* Check return value, should tell us if we are returning to the + /* + * Check return value, should tell us if we are returning to the * host (handle I/O etc)or resuming the guest */ andi t0, v0, RESUME_HOST @@ -521,8 +527,10 @@ __kvm_mips_return_to_host: LONG_L $0, PT_R0(k1) LONG_L $1, PT_R1(k1) - /* r2/v0 is the return code, shift it down by 2 (arithmetic) - * to recover the err code */ + /* + * r2/v0 is the return code, shift it down by 2 (arithmetic) + * to recover the err code + */ INT_SRA k0, v0, 2 move $2, k0 @@ -566,7 +574,6 @@ __kvm_mips_return_to_host: PTR_LI k0, 0x2000000F mtc0 k0, CP0_HWRENA - /* Restore RA, which is the address we will return to */ LONG_L ra, PT_R31(k1) j ra diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/mips.c index f3c56a182fd8..e3b21e51ff7e 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/mips.c @@ -7,7 +7,7 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + */ #include <linux/errno.h> #include <linux/err.h> @@ -21,8 +21,8 @@ #include <linux/kvm_host.h> -#include "kvm_mips_int.h" -#include "kvm_mips_comm.h" +#include "interrupt.h" +#include "commpage.h" #define CREATE_TRACE_POINTS #include "trace.h" @@ -31,38 +31,41 @@ #define VECTORSPACING 0x100 /* for EI/VI mode */ #endif -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) struct kvm_stats_debugfs_item debugfs_entries[] = { - { "wait", VCPU_STAT(wait_exits) }, - { "cache", VCPU_STAT(cache_exits) }, - { "signal", VCPU_STAT(signal_exits) }, - { "interrupt", VCPU_STAT(int_exits) }, - { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, - { "tlbmod", VCPU_STAT(tlbmod_exits) }, - { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, - { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, - { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, - { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, - { "syscall", VCPU_STAT(syscall_exits) }, - { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, - { "break_inst", VCPU_STAT(break_inst_exits) }, - { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, + { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, + { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, + { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, + { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, + { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, + { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, + { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, + { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, + { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) { int i; + for_each_possible_cpu(i) { vcpu->arch.guest_kernel_asid[i] = 0; vcpu->arch.guest_user_asid[i] = 0; } + return 0; } -/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we - * are "runnable" if interrupts are pending +/* + * XXXKYMA: We are simulatoring a processor that has the WII bit set in + * Config7, so we are "runnable" if interrupts are pending */ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { @@ -74,36 +77,29 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return 1; } -int kvm_arch_hardware_enable(void *garbage) +int kvm_arch_hardware_enable(void) { return 0; } -void kvm_arch_hardware_disable(void *garbage) -{ -} - int kvm_arch_hardware_setup(void) { return 0; } -void kvm_arch_hardware_unsetup(void) -{ -} - void kvm_arch_check_processor_compat(void *rtn) { - int *r = (int *)rtn; - *r = 0; - return; + *(int *)rtn = 0; } static void kvm_mips_init_tlbs(struct kvm *kvm) { unsigned long wired; - /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ + /* + * Add a wired entry to the TLB, it is used to map the commpage to + * the Guest kernel + */ wired = read_c0_wired(); write_c0_wired(wired + 1); mtc0_tlbw_hazard(); @@ -130,7 +126,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); } - return 0; } @@ -160,10 +155,6 @@ void kvm_mips_free_vcpus(struct kvm *kvm) mutex_unlock(&kvm->lock); } -void kvm_arch_sync_events(struct kvm *kvm) -{ -} - static void kvm_mips_uninit_tlbs(void *arg) { /* Restore wired count */ @@ -185,42 +176,33 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } } -long -kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) { return -ENOIOCTLCMD; } -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ -} - int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { return 0; } -void kvm_arch_memslots_updated(struct kvm *kvm) -{ -} - int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) { return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, - enum kvm_mr_change change) + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + enum kvm_mr_change change) { unsigned long npages = 0; - int i, err = 0; + int i; kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", __func__, kvm, mem->slot, mem->guest_phys_addr, @@ -238,40 +220,21 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, if (!kvm->arch.guest_pmap) { kvm_err("Failed to allocate guest PMAP"); - err = -ENOMEM; - goto out; + return; } kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", npages, kvm->arch.guest_pmap); /* Now setup the page table */ - for (i = 0; i < npages; i++) { + for (i = 0; i < npages; i++) kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; - } } } -out: - return; -} - -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ -} - -void kvm_arch_flush_shadow(struct kvm *kvm) -{ } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { - extern char mips32_exception[], mips32_exceptionEnd[]; - extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; int err, size, offset; void *gebase; int i; @@ -290,14 +253,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); - /* Allocate space for host mode exception handlers that handle + /* + * Allocate space for host mode exception handlers that handle * guest mode exits */ - if (cpu_has_veic || cpu_has_vint) { + if (cpu_has_veic || cpu_has_vint) size = 0x200 + VECTORSPACING * 64; - } else { + else size = 0x4000; - } /* Save Linux EBASE */ vcpu->arch.host_ebase = (void *)read_c0_ebase(); @@ -345,7 +308,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) local_flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); - /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ + /* + * Allocate comm page for guest kernel, a TLB will be reserved for + * mapping GVA @ 0xFFFF8000 to this page + */ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); if (!vcpu->arch.kseg0_commpage) { @@ -392,9 +358,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_arch_vcpu_free(vcpu); } -int -kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { return -ENOIOCTLCMD; } @@ -431,8 +396,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) return r; } -int -kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq) { int intr = (int)irq->irq; struct kvm_vcpu *dvcpu = NULL; @@ -459,23 +424,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) dvcpu->arch.wait = 0; - if (waitqueue_active(&dvcpu->wq)) { + if (waitqueue_active(&dvcpu->wq)) wake_up_interruptible(&dvcpu->wq); - } return 0; } -int -kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } -int -kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } @@ -632,10 +594,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, } if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; + return put_user(v, uaddr64); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; u32 v32 = (u32)v; + return put_user(v32, uaddr32); } else { return -EINVAL; @@ -728,8 +692,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, return 0; } -long -kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; @@ -739,6 +703,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + if (copy_from_user(®, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -773,6 +738,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_INTERRUPT: { struct kvm_mips_interrupt irq; + r = -EFAULT; if (copy_from_user(&irq, argp, sizeof(irq))) goto out; @@ -791,9 +757,7 @@ out: return r; } -/* - * Get (and clear) the dirty memory log for a memory slot. - */ +/* Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; @@ -815,8 +779,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); - printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, - ga_end); + kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, + ga_end); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); @@ -843,16 +807,12 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) int kvm_arch_init(void *opaque) { - int ret; - if (kvm_mips_callbacks) { kvm_err("kvm: module already exists\n"); return -EEXIST; } - ret = kvm_mips_emulation_init(&kvm_mips_callbacks); - - return ret; + return kvm_mips_emulation_init(&kvm_mips_callbacks); } void kvm_arch_exit(void) @@ -860,14 +820,14 @@ void kvm_arch_exit(void) kvm_mips_callbacks = NULL; } -int -kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } -int -kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } @@ -892,7 +852,7 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -int kvm_dev_ioctl_check_extension(long ext) +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; @@ -923,24 +883,25 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) if (!vcpu) return -1; - printk("VCPU Register Dump:\n"); - printk("\tpc = 0x%08lx\n", vcpu->arch.pc); - printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); + kvm_debug("VCPU Register Dump:\n"); + kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); + kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); for (i = 0; i < 32; i += 4) { - printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, + kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); } - printk("\thi: 0x%08lx\n", vcpu->arch.hi); - printk("\tlo: 0x%08lx\n", vcpu->arch.lo); + kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); + kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); cop0 = vcpu->arch.cop0; - printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", - kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); + kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", + kvm_read_c0_guest_status(cop0), + kvm_read_c0_guest_cause(cop0)); - printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); + kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); return 0; } @@ -980,14 +941,11 @@ static void kvm_mips_comparecount_func(unsigned long data) kvm_mips_callbacks->queue_timer_int(vcpu); vcpu->arch.wait = 0; - if (waitqueue_active(&vcpu->wq)) { + if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); - } } -/* - * low level hrtimer wake routine. - */ +/* low level hrtimer wake routine */ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; @@ -1006,13 +964,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return 0; } -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - return; -} - -int -kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) { return 0; } @@ -1023,8 +976,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return kvm_mips_callbacks->vcpu_setup(vcpu); } -static -void kvm_mips_set_c0_status(void) +static void kvm_mips_set_c0_status(void) { uint32_t status = read_c0_status(); @@ -1054,7 +1006,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; - /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ + /* + * Set the appropriate status bits based on host CPU features, + * before we hit the scheduler + */ kvm_mips_set_c0_status(); local_irq_enable(); @@ -1062,7 +1017,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", cause, opc, run, vcpu); - /* Do a privilege check, if in UM most of these exit conditions end up + /* + * Do a privilege check, if in UM most of these exit conditions end up * causing an exception to be delivered to the Guest Kernel */ er = kvm_mips_check_privilege(cause, opc, run, vcpu); @@ -1081,9 +1037,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ++vcpu->stat.int_exits; trace_kvm_exit(vcpu, INT_EXITS); - if (need_resched()) { + if (need_resched()) cond_resched(); - } ret = RESUME_GUEST; break; @@ -1095,9 +1050,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); /* XXXKYMA: Might need to return to user space */ - if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { + if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) ret = RESUME_HOST; - } break; case T_TLB_MOD: @@ -1107,10 +1061,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case T_TLB_ST_MISS: - kvm_debug - ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", - cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, - badvaddr); + kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", + cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, + badvaddr); ++vcpu->stat.tlbmiss_st_exits; trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); @@ -1157,10 +1110,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) break; default: - kvm_err - ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", - exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, - kvm_read_c0_guest_status(vcpu->arch.cop0)); + kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", + exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, + kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; @@ -1175,7 +1127,7 @@ skip_emul: kvm_mips_deliver_interrupts(vcpu, cause); if (!(ret & RESUME_HOST)) { - /* Only check for signals if not already exiting to userspace */ + /* Only check for signals if not already exiting to userspace */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; ret = (-EINTR << 2) | RESUME_HOST; @@ -1196,11 +1148,13 @@ int __init kvm_mips_init(void) if (ret) return ret; - /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. - * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) - * to avoid the possibility of double faulting. The issue is that the TLB code - * references routines that are part of the the KVM module, - * which are only available once the module is loaded. + /* + * On MIPS, kernel modules are executed from "mapped space", which + * requires TLBs. The TLB handling code is statically linked with + * the rest of the kernel (tlb.c) to avoid the possibility of + * double faulting. The issue is that the TLB code references + * routines that are part of the the KVM module, which are only + * available once the module is loaded. */ kvm_mips_gfn_to_pfn = gfn_to_pfn; kvm_mips_release_pfn_clean = kvm_release_pfn_clean; diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h new file mode 100644 index 000000000000..03a6ae84c7df --- /dev/null +++ b/arch/mips/kvm/opcode.h @@ -0,0 +1,22 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ + +/* Define opcode values not defined in <asm/isnt.h> */ + +#ifndef __KVM_MIPS_OPCODE_H__ +#define __KVM_MIPS_OPCODE_H__ + +/* COP0 Ops */ +#define mfmcz_op 0x0b /* 01011 */ +#define wrpgpr_op 0x0e /* 01110 */ + +/* COP0 opcodes (only if COP0 and CO=1): */ +#define wait_op 0x20 /* 100000 */ + +#endif /* __KVM_MIPS_OPCODE_H__ */ diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/stats.c index 075904bcac1b..a74d6024c5ad 100644 --- a/arch/mips/kvm/kvm_mips_stats.c +++ b/arch/mips/kvm/stats.c @@ -1,13 +1,13 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: COP0 access histogram -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: COP0 access histogram + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #include <linux/kvm_host.h> @@ -63,20 +63,18 @@ char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { "DESAVE" }; -int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) +void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) { #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS int i, j; - printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); + kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); for (i = 0; i < N_MIPS_COPROC_REGS; i++) { for (j = 0; j < N_MIPS_COPROC_SEL; j++) { if (vcpu->arch.cop0->stat[i][j]) - printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, - vcpu->arch.cop0->stat[i][j]); + kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, + vcpu->arch.cop0->stat[i][j]); } } #endif - - return 0; } diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/tlb.c index 8a5a700ad8de..bbcd82242059 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/tlb.c @@ -1,14 +1,14 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that -* TLB handlers run from KSEG0 -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that + * TLB handlers run from KSEG0 + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #include <linux/sched.h> #include <linux/smp.h> @@ -18,7 +18,6 @@ #include <linux/kvm_host.h> #include <linux/srcu.h> - #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/mmu_context.h> @@ -39,13 +38,13 @@ atomic_t kvm_mips_instance; EXPORT_SYMBOL(kvm_mips_instance); /* These function pointers are initialized once the KVM module is loaded */ -pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); +pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); -void (*kvm_mips_release_pfn_clean) (pfn_t pfn); +void (*kvm_mips_release_pfn_clean)(pfn_t pfn); EXPORT_SYMBOL(kvm_mips_release_pfn_clean); -bool(*kvm_mips_is_error_pfn) (pfn_t pfn); +bool (*kvm_mips_is_error_pfn)(pfn_t pfn); EXPORT_SYMBOL(kvm_mips_is_error_pfn); uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) @@ -53,21 +52,17 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; } - uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; } -inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) +inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.commpage_tlb; } - -/* - * Structure defining an tlb entry data set. - */ +/* Structure defining an tlb entry data set. */ void kvm_mips_dump_host_tlbs(void) { @@ -82,8 +77,8 @@ void kvm_mips_dump_host_tlbs(void) old_entryhi = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); - printk("HOST TLBs:\n"); - printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); + kvm_info("HOST TLBs:\n"); + kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); for (i = 0; i < current_cpu_data.tlbsize; i++) { write_c0_index(i); @@ -97,25 +92,26 @@ void kvm_mips_dump_host_tlbs(void) tlb.tlb_lo1 = read_c0_entrylo1(); tlb.tlb_mask = read_c0_pagemask(); - printk("TLB%c%3d Hi 0x%08lx ", - (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', - i, tlb.tlb_hi); - printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), - (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo0 >> 3) & 7); - printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), - (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + kvm_info("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); } write_c0_entryhi(old_entryhi); write_c0_pagemask(old_pagemask); mtc0_tlbw_hazard(); local_irq_restore(flags); } +EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) { @@ -123,26 +119,27 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) struct kvm_mips_tlb tlb; int i; - printk("Guest TLBs:\n"); - printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); + kvm_info("Guest TLBs:\n"); + kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { tlb = vcpu->arch.guest_tlb[i]; - printk("TLB%c%3d Hi 0x%08lx ", - (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', - i, tlb.tlb_hi); - printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), - (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo0 >> 3) & 7); - printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), - (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + kvm_info("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); } } +EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) { @@ -152,7 +149,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) return 0; - srcu_idx = srcu_read_lock(&kvm->srcu); + srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); if (kvm_mips_is_error_pfn(pfn)) { @@ -169,7 +166,7 @@ out: /* Translate guest KSEG0 addresses to Host PA */ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, - unsigned long gva) + unsigned long gva) { gfn_t gfn; uint32_t offset = gva & ~PAGE_MASK; @@ -194,20 +191,20 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; } +EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); /* XXXKYMA: Must be called with interrupts disabled */ /* set flush_dcache_mask == 0 if no dcache flush required */ -int -kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, - unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) +int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, + unsigned long entrylo0, unsigned long entrylo1, + int flush_dcache_mask) { unsigned long flags; unsigned long old_entryhi; - volatile int idx; + int idx; local_irq_save(flags); - old_entryhi = read_c0_entryhi(); write_c0_entryhi(entryhi); mtc0_tlbw_hazard(); @@ -240,12 +237,14 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, if (flush_dcache_mask) { if (entrylo0 & MIPS3_PG_V) { ++vcpu->stat.flush_dcache_exits; - flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); + flush_data_cache_page((entryhi & VPN2_MASK) & + ~flush_dcache_mask); } if (entrylo1 & MIPS3_PG_V) { ++vcpu->stat.flush_dcache_exits; - flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | - (0x1 << PAGE_SHIFT)); + flush_data_cache_page(((entryhi & VPN2_MASK) & + ~flush_dcache_mask) | + (0x1 << PAGE_SHIFT)); } } @@ -257,10 +256,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, return 0; } - /* XXXKYMA: Must be called with interrupts disabled */ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu) + struct kvm_vcpu *vcpu) { gfn_t gfn; pfn_t pfn0, pfn1; @@ -270,7 +268,6 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, struct kvm *kvm = vcpu->kvm; const int flush_dcache_mask = 0; - if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); kvm_mips_dump_host_tlbs(); @@ -302,14 +299,15 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, } entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); - entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | - (0x1 << 1); - entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | - (0x1 << 1); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | + (1 << 2) | (0x1 << 1); + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | + (1 << 2) | (0x1 << 1); return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, flush_dcache_mask); } +EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu) @@ -318,11 +316,10 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, unsigned long flags, old_entryhi = 0, vaddr = 0; unsigned long entrylo0 = 0, entrylo1 = 0; - pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; pfn1 = 0; - entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | - (0x1 << 1); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | + (1 << 2) | (0x1 << 1); entrylo1 = 0; local_irq_save(flags); @@ -341,9 +338,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, mtc0_tlbw_hazard(); tlbw_use_hazard(); - kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", - vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), - read_c0_entrylo0(), read_c0_entrylo1()); + kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", + vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), + read_c0_entrylo0(), read_c0_entrylo1()); /* Restore old ASID */ write_c0_entryhi(old_entryhi); @@ -353,28 +350,33 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, return 0; } +EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); -int -kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) +int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb, + unsigned long *hpa0, + unsigned long *hpa1) { unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; struct kvm *kvm = vcpu->kvm; pfn_t pfn0, pfn1; - if ((tlb->tlb_hi & VPN2_MASK) == 0) { pfn0 = 0; pfn1 = 0; } else { - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) + >> PAGE_SHIFT) < 0) return -1; - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) + >> PAGE_SHIFT) < 0) return -1; - pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; - pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; + pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) + >> PAGE_SHIFT]; + pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) + >> PAGE_SHIFT]; } if (hpa0) @@ -385,11 +387,12 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, /* Get attributes from the Guest TLB */ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? - kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); + kvm_mips_get_kernel_asid(vcpu) : + kvm_mips_get_user_asid(vcpu)); entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); + (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); + (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, tlb->tlb_lo0, tlb->tlb_lo1); @@ -397,6 +400,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, tlb->tlb_mask); } +EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) { @@ -404,10 +408,9 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) int index = -1; struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { - if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && - (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { + if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && + TLB_HI_ASID_HIT(tlb[i], entryhi)) { index = i; break; } @@ -418,21 +421,23 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) return index; } +EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) { unsigned long old_entryhi, flags; - volatile int idx; - + int idx; local_irq_save(flags); old_entryhi = read_c0_entryhi(); if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); + write_c0_entryhi((vaddr & VPN2_MASK) | + kvm_mips_get_kernel_asid(vcpu)); else { - write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); + write_c0_entryhi((vaddr & VPN2_MASK) | + kvm_mips_get_user_asid(vcpu)); } mtc0_tlbw_hazard(); @@ -452,6 +457,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) return idx; } +EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) { @@ -460,7 +466,6 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) local_irq_save(flags); - old_entryhi = read_c0_entryhi(); write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); @@ -499,8 +504,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) return 0; } +EXPORT_SYMBOL(kvm_mips_host_tlb_inv); -/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ +/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) { unsigned long flags, old_entryhi; @@ -510,7 +516,6 @@ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) local_irq_save(flags); - old_entryhi = read_c0_entryhi(); write_c0_entryhi(UNIQUE_ENTRYHI(index)); @@ -546,7 +551,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) int entry = 0; int maxentry = current_cpu_data.tlbsize; - local_irq_save(flags); old_entryhi = read_c0_entryhi(); @@ -554,7 +558,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) /* Blast 'em all away. */ for (entry = 0; entry < maxentry; entry++) { - write_c0_index(entry); mtc0_tlbw_hazard(); @@ -565,9 +568,8 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) entryhi = read_c0_entryhi(); /* Don't blow away guest kernel entries */ - if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { + if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) continue; - } } /* Make sure all entries differ. */ @@ -591,17 +593,17 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) local_irq_restore(flags); } +EXPORT_SYMBOL(kvm_mips_flush_host_tlb); -void -kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, - struct kvm_vcpu *vcpu) +void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, + struct kvm_vcpu *vcpu) { unsigned long asid = asid_cache(cpu); - if (!((asid += ASID_INC) & ASID_MASK)) { - if (cpu_has_vtag_icache) { + asid += ASID_INC; + if (!(asid & ASID_MASK)) { + if (cpu_has_vtag_icache) flush_icache_all(); - } kvm_local_flush_tlb_all(); /* start new asid cycle */ @@ -639,6 +641,7 @@ void kvm_local_flush_tlb_all(void) local_irq_restore(flags); } +EXPORT_SYMBOL(kvm_local_flush_tlb_all); /** * kvm_mips_migrate_count() - Migrate timer. @@ -699,7 +702,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } if (!newasid) { - /* If we preempted while the guest was executing, then reload the pre-empted ASID */ + /* + * If we preempted while the guest was executing, then reload + * the pre-empted ASID + */ if (current->flags & PF_VCPU) { write_c0_entryhi(vcpu->arch. preempt_entryhi & ASID_MASK); @@ -708,9 +714,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } else { /* New ASIDs were allocated for the VM */ - /* Were we in guest context? If so then the pre-empted ASID is no longer - * valid, we need to set it to what it should be based on the mode of - * the Guest (Kernel/User) + /* + * Were we in guest context? If so then the pre-empted ASID is + * no longer valid, we need to set it to what it should be based + * on the mode of the Guest (Kernel/User) */ if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu)) @@ -728,6 +735,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) local_irq_restore(flags); } +EXPORT_SYMBOL(kvm_arch_vcpu_load); /* ASID can change if another task is scheduled during preemption */ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -739,7 +747,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) cpu = smp_processor_id(); - vcpu->arch.preempt_entryhi = read_c0_entryhi(); vcpu->arch.last_sched_cpu = cpu; @@ -754,11 +761,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) local_irq_restore(flags); } +EXPORT_SYMBOL(kvm_arch_vcpu_put); uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - unsigned long paddr, flags; + unsigned long paddr, flags, vpn2, asid; uint32_t inst; int index; @@ -769,16 +777,12 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) if (index >= 0) { inst = *(opc); } else { - index = - kvm_mips_guest_tlb_lookup(vcpu, - ((unsigned long) opc & VPN2_MASK) - | - (kvm_read_c0_guest_entryhi - (cop0) & ASID_MASK)); + vpn2 = (unsigned long) opc & VPN2_MASK; + asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK; + index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); if (index < 0) { - kvm_err - ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", - __func__, opc, vcpu, read_c0_entryhi()); + kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", + __func__, opc, vcpu, read_c0_entryhi()); kvm_mips_dump_host_tlbs(); local_irq_restore(flags); return KVM_INVALID_INST; @@ -793,7 +797,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, - (unsigned long) opc); + (unsigned long) opc); inst = *(uint32_t *) CKSEG0ADDR(paddr); } else { kvm_err("%s: illegal address: %p\n", __func__, opc); @@ -802,18 +806,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) return inst; } - -EXPORT_SYMBOL(kvm_local_flush_tlb_all); -EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); -EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); -EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); -EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); -EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); -EXPORT_SYMBOL(kvm_mips_flush_host_tlb); -EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); -EXPORT_SYMBOL(kvm_mips_host_tlb_inv); -EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); -EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); EXPORT_SYMBOL(kvm_get_inst); -EXPORT_SYMBOL(kvm_arch_vcpu_load); -EXPORT_SYMBOL(kvm_arch_vcpu_put); diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index bc9e0f406c08..c1388d40663b 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -1,11 +1,11 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_KVM_H @@ -17,9 +17,7 @@ #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace -/* - * Tracepoints for VM eists - */ +/* Tracepoints for VM eists */ extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; TRACE_EVENT(kvm_exit, diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/trap_emul.c index 693f952b2fbb..fd7257b70e65 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -1,13 +1,13 @@ /* -* This file is subject to the terms and conditions of the GNU General Public -* License. See the file "COPYING" in the main directory of this archive -* for more details. -* -* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel -* -* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. -* Authors: Sanjay Lal <sanjayl@kymasys.com> -*/ + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> + */ #include <linux/errno.h> #include <linux/err.h> @@ -16,8 +16,8 @@ #include <linux/kvm_host.h> -#include "kvm_mips_opcode.h" -#include "kvm_mips_int.h" +#include "opcode.h" +#include "interrupt.h" static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) { @@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) if ((kseg == CKSEG0) || (kseg == CKSEG1)) gpa = CPHYSADDR(gva); else { - printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); + kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); kvm_mips_dump_host_tlbs(); gpa = KVM_INVALID_ADDR; } @@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) return gpa; } - static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); - } else + else er = kvm_mips_emulate_inst(cause, opc, run, vcpu); switch (er) { @@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { - kvm_debug - ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); if (er == EMULATE_DONE) @@ -95,20 +93,20 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) ret = RESUME_HOST; } } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { - /* XXXKYMA: The guest kernel does not expect to get this fault when we are not - * using HIGHMEM. Need to address this in a HIGHMEM kernel + /* + * XXXKYMA: The guest kernel does not expect to get this fault + * when we are not using HIGHMEM. Need to address this in a + * HIGHMEM kernel */ - printk - ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); kvm_mips_dump_host_tlbs(); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; } else { - printk - ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); kvm_mips_dump_host_tlbs(); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -134,9 +132,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) } } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { - kvm_debug - ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); if (er == EMULATE_DONE) ret = RESUME_GUEST; @@ -145,8 +142,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ret = RESUME_HOST; } } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { - /* All KSEG0 faults are handled by KVM, as the guest kernel does not - * expect to ever get them + /* + * All KSEG0 faults are handled by KVM, as the guest kernel does + * not expect to ever get them */ if (kvm_mips_handle_kseg0_tlb_fault (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { @@ -154,9 +152,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ret = RESUME_HOST; } } else { - kvm_err - ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); kvm_mips_dump_host_tlbs(); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -185,11 +182,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", vcpu->arch.pc, badvaddr); - /* User Address (UA) fault, this could happen if - * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this - * case we pass on the fault to the guest kernel and let it handle it. - * (2) TLB entry is present in the Guest TLB but not in the shadow, in this - * case we inject the TLB from the Guest TLB into the shadow host TLB + /* + * User Address (UA) fault, this could happen if + * (1) TLB entry not present/valid in both Guest and shadow host + * TLBs, in this case we pass on the fault to the guest + * kernel and let it handle it. + * (2) TLB entry is present in the Guest TLB but not in the + * shadow, in this case we inject the TLB from the Guest TLB + * into the shadow host TLB */ er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); @@ -206,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) ret = RESUME_HOST; } } else { - printk - ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); kvm_mips_dump_host_tlbs(); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -231,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) kvm_debug("Emulate Store to MMIO space\n"); er = kvm_mips_emulate_inst(cause, opc, run, vcpu); if (er == EMULATE_FAIL) { - printk("Emulate Store to MMIO space failed\n"); + kvm_err("Emulate Store to MMIO space failed\n"); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; } else { @@ -239,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) ret = RESUME_HOST; } } else { - printk - ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; } @@ -261,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); er = kvm_mips_emulate_inst(cause, opc, run, vcpu); if (er == EMULATE_FAIL) { - printk("Emulate Load from MMIO space failed\n"); + kvm_err("Emulate Load from MMIO space failed\n"); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; } else { @@ -269,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) ret = RESUME_HOST; } } else { - printk - ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); + kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; er = EMULATE_FAIL; @@ -349,9 +346,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) uint32_t config1; int vcpu_id = vcpu->vcpu_id; - /* Arch specific stuff, set up config registers properly so that the - * guest will come up as expected, for now we simulate a - * MIPS 24kc + /* + * Arch specific stuff, set up config registers properly so that the + * guest will come up as expected, for now we simulate a MIPS 24kc */ kvm_write_c0_guest_prid(cop0, 0x00019300); kvm_write_c0_guest_config(cop0, @@ -373,14 +370,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ - kvm_write_c0_guest_config3(cop0, - MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << - CP0C3_ULRI)); + kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | + (1 << CP0C3_ULRI)); /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); - /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ + /* + * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) + */ kvm_write_c0_guest_intctl(cop0, 0xFC000000); /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index c0021912131e..e10d33342b30 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -30,6 +30,7 @@ choice config DT_EASY50712 bool "Easy50712" depends on SOC_XWAY + select BUILTIN_DTB endchoice config PCI_LANTIQ diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile index d6bdc579419f..690257ab86d6 100644 --- a/arch/mips/lantiq/Makefile +++ b/arch/mips/lantiq/Makefile @@ -6,8 +6,6 @@ obj-y := irq.o clk.o prom.o -obj-y += dts/ - obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_SOC_TYPE_XWAY) += xway/ diff --git a/arch/mips/lantiq/dts/Makefile b/arch/mips/lantiq/dts/Makefile deleted file mode 100644 index 6fa72dd641b2..000000000000 --- a/arch/mips/lantiq/dts/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_DT_EASY50712) := easy50712.dtb.o diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 8f1866d8124d..468ffa043607 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -221,7 +221,7 @@ void __init ltq_soc_init(void) (request_mem_region(res_sys[2].start, resource_size(&res_sys[2]), res_sys[2].name) < 0)) - pr_err("Failed to request core reources"); + pr_err("Failed to request core resources"); status_membase = ioremap_nocache(res_status.start, resource_size(&res_status)); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 51804b10a036..2b15491de494 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -318,7 +318,7 @@ void __init ltq_soc_init(void) res_cgu.name) < 0) || (request_mem_region(res_ebu.start, resource_size(&res_ebu), res_ebu.name) < 0)) - pr_err("Failed to request core reources"); + pr_err("Failed to request core resources"); pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu)); ltq_cgu_membase = ioremap_nocache(res_cgu.start, diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 9901237563c5..4c721e247ac9 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -277,9 +277,12 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3) + .set push + .set arch=mips32r2 wsbh v1, sum movn sum, v1, t7 + .set pop #else beqz t7, 1f /* odd buffer alignment? */ lui v1, 0x00ff @@ -726,9 +729,12 @@ LEAF(csum_partial) addu sum, v1 #endif -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3) + .set push + .set arch=mips32r2 wsbh v1, sum movn sum, v1, odd + .set pop #else beqz odd, 1f /* odd buffer alignment? */ lui v1, 0x00ff diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index e6a86ccc4421..1b91fc6a921b 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -60,8 +60,8 @@ config LEMOTE_MACH2F These family machines include fuloong2f mini PC, yeeloong2f notebook, LingLoong allinone PC and so forth. -config LEMOTE_MACH3A - bool "Lemote Loongson 3A family machines" +config LOONGSON_MACH3X + bool "Generic Loongson 3 family machines" select ARCH_SPARSEMEM_ENABLE select GENERIC_ISA_DMA_SUPPORT_BROKEN select BOOT_ELF32 @@ -79,6 +79,7 @@ config LEMOTE_MACH3A select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_SMP select SYS_SUPPORTS_HOTPLUG_CPU + select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN @@ -86,8 +87,8 @@ config LEMOTE_MACH3A select ZONE_DMA32 select LEFI_FIRMWARE_INTERFACE help - Lemote Loongson 3A family machines utilize the 3A revision of - Loongson processor and RS780/SBX00 chipset. + Generic Loongson 3 family machines utilize the 3A/3B revision + of Loongson processor and RS780/SBX00 chipset. endchoice config CS5536 diff --git a/arch/mips/loongson/Platform b/arch/mips/loongson/Platform index 6205372b6c2d..0ac20eb84ecc 100644 --- a/arch/mips/loongson/Platform +++ b/arch/mips/loongson/Platform @@ -30,4 +30,4 @@ platform-$(CONFIG_MACH_LOONGSON) += loongson/ cflags-$(CONFIG_MACH_LOONGSON) += -I$(srctree)/arch/mips/include/asm/mach-loongson -mno-branch-likely load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000 load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000 -load-$(CONFIG_CPU_LOONGSON3) += 0xffffffff80200000 +load-$(CONFIG_LOONGSON_MACH3X) += 0xffffffff80200000 diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c index 0c543eae49bf..f15228550a22 100644 --- a/arch/mips/loongson/common/env.c +++ b/arch/mips/loongson/common/env.c @@ -27,6 +27,12 @@ EXPORT_SYMBOL(cpu_clock_freq); struct efi_memory_map_loongson *loongson_memmap; struct loongson_system_configuration loongson_sysconf; +u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180}; +u64 loongson_freqctrl[MAX_PACKAGES]; + +unsigned long long smp_group[4]; +int cpuhotplug_workaround = 0; + #define parse_even_earlier(res, option, p) \ do { \ unsigned int tmp __maybe_unused; \ @@ -77,9 +83,47 @@ void __init prom_init_env(void) cpu_clock_freq = ecpu->cpu_clock_freq; loongson_sysconf.cputype = ecpu->cputype; + if (ecpu->cputype == Loongson_3A) { + loongson_sysconf.cores_per_node = 4; + loongson_sysconf.cores_per_package = 4; + smp_group[0] = 0x900000003ff01000; + smp_group[1] = 0x900010003ff01000; + smp_group[2] = 0x900020003ff01000; + smp_group[3] = 0x900030003ff01000; + loongson_chipcfg[0] = 0x900000001fe00180; + loongson_chipcfg[1] = 0x900010001fe00180; + loongson_chipcfg[2] = 0x900020001fe00180; + loongson_chipcfg[3] = 0x900030001fe00180; + loongson_sysconf.ht_control_base = 0x90000EFDFB000000; + } else if (ecpu->cputype == Loongson_3B) { + loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */ + loongson_sysconf.cores_per_package = 8; + smp_group[0] = 0x900000003ff01000; + smp_group[1] = 0x900010003ff05000; + smp_group[2] = 0x900020003ff09000; + smp_group[3] = 0x900030003ff0d000; + loongson_chipcfg[0] = 0x900000001fe00180; + loongson_chipcfg[1] = 0x900020001fe00180; + loongson_chipcfg[2] = 0x900040001fe00180; + loongson_chipcfg[3] = 0x900060001fe00180; + loongson_freqctrl[0] = 0x900000001fe001d0; + loongson_freqctrl[1] = 0x900020001fe001d0; + loongson_freqctrl[2] = 0x900040001fe001d0; + loongson_freqctrl[3] = 0x900060001fe001d0; + loongson_sysconf.ht_control_base = 0x90001EFDFB000000; + cpuhotplug_workaround = 1; + } else { + loongson_sysconf.cores_per_node = 1; + loongson_sysconf.cores_per_package = 1; + loongson_chipcfg[0] = 0x900000001fe00180; + } + loongson_sysconf.nr_cpus = ecpu->nr_cpus; if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0) loongson_sysconf.nr_cpus = NR_CPUS; + loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus + + loongson_sysconf.cores_per_node - 1) / + loongson_sysconf.cores_per_node; loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr; loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr; @@ -93,7 +137,6 @@ void __init prom_init_env(void) loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown; loongson_sysconf.suspend_addr = boot_p->reset_system.DoSuspend; - loongson_sysconf.ht_control_base = 0x90000EFDFB000000; loongson_sysconf.vgabios_addr = boot_p->efi.smbios.vga_bios; pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n", loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr, @@ -111,6 +154,10 @@ void __init prom_init_env(void) case PRID_REV_LOONGSON3A: cpu_clock_freq = 900000000; break; + case PRID_REV_LOONGSON3B_R1: + case PRID_REV_LOONGSON3B_R2: + cpu_clock_freq = 1000000000; + break; default: cpu_clock_freq = 100000000; break; diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c index f37fe5413b73..f6af3aba4c86 100644 --- a/arch/mips/loongson/common/init.c +++ b/arch/mips/loongson/common/init.c @@ -30,7 +30,11 @@ void __init prom_init(void) set_io_port_base((unsigned long) ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE)); +#ifdef CONFIG_NUMA + prom_init_numa_memory(); +#else prom_init_memory(); +#endif /*init the uart base address */ prom_init_uart_base(); diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c index f55e07aee071..a6b67ccfc811 100644 --- a/arch/mips/loongson/common/pm.c +++ b/arch/mips/loongson/common/pm.c @@ -79,7 +79,7 @@ int __weak wakeup_loongson(void) static void wait_for_wakeup_events(void) { while (!wakeup_loongson()) - LOONGSON_CHIPCFG0 &= ~0x7; + LOONGSON_CHIPCFG(0) &= ~0x7; } /* @@ -102,15 +102,15 @@ static void loongson_suspend_enter(void) stop_perf_counters(); - cached_cpu_freq = LOONGSON_CHIPCFG0; + cached_cpu_freq = LOONGSON_CHIPCFG(0); /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 &= ~0x7; + LOONGSON_CHIPCFG(0) &= ~0x7; /* wait for the given events to wakeup cpu from wait mode */ wait_for_wakeup_events(); - LOONGSON_CHIPCFG0 = cached_cpu_freq; + LOONGSON_CHIPCFG(0) = cached_cpu_freq; mmiowb(); } diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c index 1eed38e28b1e..a217061beee3 100644 --- a/arch/mips/loongson/lemote-2f/clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -114,9 +114,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) clk->rate = rate; - regval = LOONGSON_CHIPCFG0; + regval = LOONGSON_CHIPCFG(0); regval = (regval & ~0x7) | (pos->driver_data - 1); - LOONGSON_CHIPCFG0 = regval; + LOONGSON_CHIPCFG(0) = regval; return ret; } diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c index 90962a3a1731..79ac694fe744 100644 --- a/arch/mips/loongson/lemote-2f/reset.c +++ b/arch/mips/loongson/lemote-2f/reset.c @@ -28,7 +28,7 @@ static void reset_cpu(void) * reset cpu to full speed, this is needed when enabling cpu frequency * scalling */ - LOONGSON_CHIPCFG0 |= 0x7; + LOONGSON_CHIPCFG(0) |= 0x7; } /* reset support for fuloong2f */ diff --git a/arch/mips/loongson/loongson-3/Makefile b/arch/mips/loongson/loongson-3/Makefile index 70152b252ddc..b4df775b9f30 100644 --- a/arch/mips/loongson/loongson-3/Makefile +++ b/arch/mips/loongson/loongson-3/Makefile @@ -1,6 +1,8 @@ # # Makefile for Loongson-3 family machines # -obj-y += irq.o +obj-y += irq.o cop2-ex.o obj-$(CONFIG_SMP) += smp.o + +obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/mips/loongson/loongson-3/cop2-ex.c b/arch/mips/loongson/loongson-3/cop2-ex.c new file mode 100644 index 000000000000..b03e37d2071a --- /dev/null +++ b/arch/mips/loongson/loongson-3/cop2-ex.c @@ -0,0 +1,63 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014 Lemote Corporation. + * written by Huacai Chen <chenhc@lemote.com> + * + * based on arch/mips/cavium-octeon/cpu.c + * Copyright (C) 2009 Wind River Systems, + * written by Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/notifier.h> + +#include <asm/fpu.h> +#include <asm/cop2.h> +#include <asm/current.h> +#include <asm/mipsregs.h> + +static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, + void *data) +{ + int fpu_owned; + int fr = !test_thread_flag(TIF_32BIT_FPREGS); + + switch (action) { + case CU2_EXCEPTION: + preempt_disable(); + fpu_owned = __is_fpu_owner(); + if (!fr) + set_c0_status(ST0_CU1 | ST0_CU2); + else + set_c0_status(ST0_CU1 | ST0_CU2 | ST0_FR); + enable_fpu_hazard(); + KSTK_STATUS(current) |= (ST0_CU1 | ST0_CU2); + if (fr) + KSTK_STATUS(current) |= ST0_FR; + else + KSTK_STATUS(current) &= ~ST0_FR; + /* If FPU is owned, we needn't init or restore fp */ + if (!fpu_owned) { + set_thread_flag(TIF_USEDFPU); + if (!used_math()) { + _init_fpu(); + set_used_math(); + } else + _restore_fp(current); + } + preempt_enable(); + + return NOTIFY_STOP; /* Don't call default notifier */ + } + + return NOTIFY_OK; /* Let default notifier send signals */ +} + +static int __init loongson_cu2_setup(void) +{ + return cu2_notifier(loongson_cu2_call, 0); +} +early_initcall(loongson_cu2_setup); diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c index f240828181ff..ca1c62af5188 100644 --- a/arch/mips/loongson/loongson-3/irq.c +++ b/arch/mips/loongson/loongson-3/irq.c @@ -7,6 +7,8 @@ #include <asm/i8259.h> #include <asm/mipsregs.h> +#include "smp.h" + unsigned int ht_irq[] = {1, 3, 4, 5, 6, 7, 8, 12, 14, 15}; static void ht_irqdispatch(void) @@ -53,9 +55,15 @@ static inline void mask_loongson_irq(struct irq_data *d) /* Workaround: UART IRQ may deliver to any core */ if (d->irq == LOONGSON_UART_IRQ) { int cpu = smp_processor_id(); - - LOONGSON_INT_ROUTER_INTENCLR = 1 << 10; - LOONGSON_INT_ROUTER_LPC = 0x10 + (1<<cpu); + int node_id = cpu / loongson_sysconf.cores_per_node; + int core_id = cpu % loongson_sysconf.cores_per_node; + u64 intenclr_addr = smp_group[node_id] | + (u64)(&LOONGSON_INT_ROUTER_INTENCLR); + u64 introuter_lpc_addr = smp_group[node_id] | + (u64)(&LOONGSON_INT_ROUTER_LPC); + + *(volatile u32 *)intenclr_addr = 1 << 10; + *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); } } @@ -64,9 +72,15 @@ static inline void unmask_loongson_irq(struct irq_data *d) /* Workaround: UART IRQ may deliver to any core */ if (d->irq == LOONGSON_UART_IRQ) { int cpu = smp_processor_id(); - - LOONGSON_INT_ROUTER_INTENSET = 1 << 10; - LOONGSON_INT_ROUTER_LPC = 0x10 + (1<<cpu); + int node_id = cpu / loongson_sysconf.cores_per_node; + int core_id = cpu % loongson_sysconf.cores_per_node; + u64 intenset_addr = smp_group[node_id] | + (u64)(&LOONGSON_INT_ROUTER_INTENSET); + u64 introuter_lpc_addr = smp_group[node_id] | + (u64)(&LOONGSON_INT_ROUTER_LPC); + + *(volatile u32 *)intenset_addr = 1 << 10; + *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); } set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c new file mode 100644 index 000000000000..37ed184398c6 --- /dev/null +++ b/arch/mips/loongson/loongson-3/numa.c @@ -0,0 +1,289 @@ +/* + * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & + * Insititute of Computing Technology + * Author: Xiang Gao, gaoxiang@ict.ac.cn + * Huacai Chen, chenhc@lemote.com + * Xiaofu Meng, Shuangshuang Zhang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/module.h> +#include <linux/nodemask.h> +#include <linux/swap.h> +#include <linux/memblock.h> +#include <linux/bootmem.h> +#include <linux/pfn.h> +#include <linux/highmem.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/sections.h> +#include <linux/irq.h> +#include <asm/bootinfo.h> +#include <asm/mc146818-time.h> +#include <asm/time.h> +#include <asm/wbflush.h> +#include <boot_param.h> + +static struct node_data prealloc__node_data[MAX_NUMNODES]; +unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; +struct node_data *__node_data[MAX_NUMNODES]; +EXPORT_SYMBOL(__node_data); + +static void enable_lpa(void) +{ + unsigned long value; + + value = __read_32bit_c0_register($16, 3); + value |= 0x00000080; + __write_32bit_c0_register($16, 3, value); + value = __read_32bit_c0_register($16, 3); + pr_info("CP0_Config3: CP0 16.3 (0x%lx)\n", value); + + value = __read_32bit_c0_register($5, 1); + value |= 0x20000000; + __write_32bit_c0_register($5, 1, value); + value = __read_32bit_c0_register($5, 1); + pr_info("CP0_PageGrain: CP0 5.1 (0x%lx)\n", value); +} + +static void cpu_node_probe(void) +{ + int i; + + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + for (i = 0; i < loongson_sysconf.nr_nodes; i++) { + node_set_state(num_online_nodes(), N_POSSIBLE); + node_set_online(num_online_nodes()); + } + + pr_info("NUMA: Discovered %d cpus on %d nodes\n", + loongson_sysconf.nr_cpus, num_online_nodes()); +} + +static int __init compute_node_distance(int row, int col) +{ + int package_row = row * loongson_sysconf.cores_per_node / + loongson_sysconf.cores_per_package; + int package_col = col * loongson_sysconf.cores_per_node / + loongson_sysconf.cores_per_package; + + if (col == row) + return 0; + else if (package_row == package_col) + return 40; + else + return 100; +} + +static void __init init_topology_matrix(void) +{ + int row, col; + + for (row = 0; row < MAX_NUMNODES; row++) + for (col = 0; col < MAX_NUMNODES; col++) + __node_distances[row][col] = -1; + + for_each_online_node(row) { + for_each_online_node(col) { + __node_distances[row][col] = + compute_node_distance(row, col); + } + } +} + +static unsigned long nid_to_addroffset(unsigned int nid) +{ + unsigned long result; + switch (nid) { + case 0: + default: + result = NODE0_ADDRSPACE_OFFSET; + break; + case 1: + result = NODE1_ADDRSPACE_OFFSET; + break; + case 2: + result = NODE2_ADDRSPACE_OFFSET; + break; + case 3: + result = NODE3_ADDRSPACE_OFFSET; + break; + } + return result; +} + +static void __init szmem(unsigned int node) +{ + u32 i, mem_type; + static unsigned long num_physpages = 0; + u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; + + /* Parse memory information and activate */ + for (i = 0; i < loongson_memmap->nr_map; i++) { + node_id = loongson_memmap->map[i].node_id; + if (node_id != node) + continue; + + mem_type = loongson_memmap->map[i].mem_type; + mem_size = loongson_memmap->map[i].mem_size; + mem_start = loongson_memmap->map[i].mem_start; + + switch (mem_type) { + case SYSTEM_RAM_LOW: + start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; + node_psize = (mem_size << 20) >> PAGE_SHIFT; + end_pfn = start_pfn + node_psize; + num_physpages += node_psize; + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", + (u32)node_id, mem_type, mem_start, mem_size); + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", + start_pfn, end_pfn, num_physpages); + add_memory_region((node_id << 44) + mem_start, + (u64)mem_size << 20, BOOT_MEM_RAM); + memblock_add_node(PFN_PHYS(start_pfn), + PFN_PHYS(end_pfn - start_pfn), node); + break; + case SYSTEM_RAM_HIGH: + start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; + node_psize = (mem_size << 20) >> PAGE_SHIFT; + end_pfn = start_pfn + node_psize; + num_physpages += node_psize; + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", + (u32)node_id, mem_type, mem_start, mem_size); + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", + start_pfn, end_pfn, num_physpages); + add_memory_region((node_id << 44) + mem_start, + (u64)mem_size << 20, BOOT_MEM_RAM); + memblock_add_node(PFN_PHYS(start_pfn), + PFN_PHYS(end_pfn - start_pfn), node); + break; + case MEM_RESERVED: + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", + (u32)node_id, mem_type, mem_start, mem_size); + add_memory_region((node_id << 44) + mem_start, + (u64)mem_size << 20, BOOT_MEM_RESERVED); + memblock_reserve(((node_id << 44) + mem_start), + mem_size << 20); + break; + } + } +} + +static void __init node_mem_init(unsigned int node) +{ + unsigned long bootmap_size; + unsigned long node_addrspace_offset; + unsigned long start_pfn, end_pfn, freepfn; + + node_addrspace_offset = nid_to_addroffset(node); + pr_info("Node%d's addrspace_offset is 0x%lx\n", + node, node_addrspace_offset); + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + freepfn = start_pfn; + if (node == 0) + freepfn = PFN_UP(__pa_symbol(&_end)); /* kernel end address */ + pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx, freepfn=0x%lx\n", + node, start_pfn, end_pfn, freepfn); + + __node_data[node] = prealloc__node_data + node; + + NODE_DATA(node)->bdata = &bootmem_node_data[node]; + NODE_DATA(node)->node_start_pfn = start_pfn; + NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; + + bootmap_size = init_bootmem_node(NODE_DATA(node), freepfn, + start_pfn, end_pfn); + free_bootmem_with_active_regions(node, end_pfn); + if (node == 0) /* used by finalize_initrd() */ + max_low_pfn = end_pfn; + + /* This is reserved for the kernel and bdata->node_bootmem_map */ + reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT, + ((freepfn - start_pfn) << PAGE_SHIFT) + bootmap_size, + BOOTMEM_DEFAULT); + + if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { + /* Reserve 0xff800000~0xffffffff for RS780E integrated GPU */ + reserve_bootmem_node(NODE_DATA(node), + (node_addrspace_offset | 0xff800000), + 8 << 20, BOOTMEM_DEFAULT); + } + + sparse_memory_present_with_active_regions(node); +} + +static __init void prom_meminit(void) +{ + unsigned int node, cpu; + + cpu_node_probe(); + init_topology_matrix(); + + for (node = 0; node < loongson_sysconf.nr_nodes; node++) { + if (node_online(node)) { + szmem(node); + node_mem_init(node); + cpus_clear(__node_data[(node)]->cpumask); + } + } + for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { + node = cpu / loongson_sysconf.cores_per_node; + if (node >= num_online_nodes()) + node = 0; + pr_info("NUMA: set cpumask cpu %d on node %d\n", cpu, node); + cpu_set(cpu, __node_data[(node)]->cpumask); + } +} + +void __init paging_init(void) +{ + unsigned node; + unsigned long zones_size[MAX_NR_ZONES] = {0, }; + + pagetable_init(); + + for_each_online_node(node) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + + if (end_pfn > max_low_pfn) + max_low_pfn = end_pfn; + } +#ifdef CONFIG_ZONE_DMA32 + zones_size[ZONE_DMA32] = MAX_DMA32_PFN; +#endif + zones_size[ZONE_NORMAL] = max_low_pfn; + free_area_init_nodes(zones_size); +} + +void __init mem_init(void) +{ + high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); + free_all_bootmem(); + setup_zero_pages(); /* This comes from node 0 */ + mem_init_print_info(NULL); +} + +/* All PCI device belongs to logical Node-0 */ +int pcibus_to_node(struct pci_bus *bus) +{ + return 0; +} +EXPORT_SYMBOL(pcibus_to_node); + +void __init prom_init_numa_memory(void) +{ + enable_lpa(); + prom_meminit(); +} +EXPORT_SYMBOL(prom_init_numa_memory); diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index 1e8894020ea5..d8c63af6c7cc 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c @@ -31,6 +31,12 @@ DEFINE_PER_CPU(int, cpu_state); DEFINE_PER_CPU(uint32_t, core0_c0count); +static void *ipi_set0_regs[16]; +static void *ipi_clear0_regs[16]; +static void *ipi_status0_regs[16]; +static void *ipi_en0_regs[16]; +static void *ipi_mailbox_buf[16]; + /* read a 32bit value from ipi register */ #define loongson3_ipi_read32(addr) readl(addr) /* read a 64bit value from ipi register */ @@ -48,100 +54,185 @@ DEFINE_PER_CPU(uint32_t, core0_c0count); __wbflush(); \ } while (0) -static void *ipi_set0_regs[] = { - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0), -}; +static void ipi_set0_regs_init(void) +{ + ipi_set0_regs[0] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0); + ipi_set0_regs[1] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0); + ipi_set0_regs[2] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0); + ipi_set0_regs[3] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0); + ipi_set0_regs[4] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0); + ipi_set0_regs[5] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0); + ipi_set0_regs[6] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0); + ipi_set0_regs[7] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0); + ipi_set0_regs[8] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0); + ipi_set0_regs[9] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0); + ipi_set0_regs[10] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0); + ipi_set0_regs[11] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0); + ipi_set0_regs[12] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0); + ipi_set0_regs[13] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0); + ipi_set0_regs[14] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0); + ipi_set0_regs[15] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0); +} -static void *ipi_clear0_regs[] = { - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0), -}; +static void ipi_clear0_regs_init(void) +{ + ipi_clear0_regs[0] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0); + ipi_clear0_regs[1] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0); + ipi_clear0_regs[2] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0); + ipi_clear0_regs[3] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0); + ipi_clear0_regs[4] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0); + ipi_clear0_regs[5] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0); + ipi_clear0_regs[6] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0); + ipi_clear0_regs[7] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0); + ipi_clear0_regs[8] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0); + ipi_clear0_regs[9] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0); + ipi_clear0_regs[10] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0); + ipi_clear0_regs[11] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0); + ipi_clear0_regs[12] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0); + ipi_clear0_regs[13] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0); + ipi_clear0_regs[14] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0); + ipi_clear0_regs[15] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0); +} -static void *ipi_status0_regs[] = { - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0), -}; +static void ipi_status0_regs_init(void) +{ + ipi_status0_regs[0] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0); + ipi_status0_regs[1] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0); + ipi_status0_regs[2] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0); + ipi_status0_regs[3] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0); + ipi_status0_regs[4] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0); + ipi_status0_regs[5] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0); + ipi_status0_regs[6] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0); + ipi_status0_regs[7] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0); + ipi_status0_regs[8] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0); + ipi_status0_regs[9] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0); + ipi_status0_regs[10] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0); + ipi_status0_regs[11] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0); + ipi_status0_regs[12] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0); + ipi_status0_regs[13] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0); + ipi_status0_regs[14] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0); + ipi_status0_regs[15] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0); +} -static void *ipi_en0_regs[] = { - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0), -}; +static void ipi_en0_regs_init(void) +{ + ipi_en0_regs[0] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0); + ipi_en0_regs[1] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0); + ipi_en0_regs[2] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0); + ipi_en0_regs[3] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0); + ipi_en0_regs[4] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0); + ipi_en0_regs[5] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0); + ipi_en0_regs[6] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0); + ipi_en0_regs[7] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0); + ipi_en0_regs[8] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0); + ipi_en0_regs[9] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0); + ipi_en0_regs[10] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0); + ipi_en0_regs[11] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0); + ipi_en0_regs[12] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0); + ipi_en0_regs[13] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0); + ipi_en0_regs[14] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0); + ipi_en0_regs[15] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0); +} -static void *ipi_mailbox_buf[] = { - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF), - (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF), - (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF), - (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF), - (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF), -}; +static void ipi_mailbox_buf_init(void) +{ + ipi_mailbox_buf[0] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF); + ipi_mailbox_buf[1] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF); + ipi_mailbox_buf[2] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF); + ipi_mailbox_buf[3] = (void *) + (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF); + ipi_mailbox_buf[4] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF); + ipi_mailbox_buf[5] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF); + ipi_mailbox_buf[6] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF); + ipi_mailbox_buf[7] = (void *) + (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF); + ipi_mailbox_buf[8] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF); + ipi_mailbox_buf[9] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF); + ipi_mailbox_buf[10] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF); + ipi_mailbox_buf[11] = (void *) + (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF); + ipi_mailbox_buf[12] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF); + ipi_mailbox_buf[13] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF); + ipi_mailbox_buf[14] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF); + ipi_mailbox_buf[15] = (void *) + (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF); +} /* * Simple enough, just poke the appropriate ipi register @@ -203,19 +294,21 @@ static void loongson3_init_secondary(void) for (i = 0; i < loongson_sysconf.nr_cpus; i++) loongson3_ipi_write32(0xffffffff, ipi_en0_regs[i]); + cpu_data[cpu].package = cpu / loongson_sysconf.cores_per_package; + cpu_data[cpu].core = cpu % loongson_sysconf.cores_per_package; per_cpu(cpu_state, cpu) = CPU_ONLINE; i = 0; - __get_cpu_var(core0_c0count) = 0; + __this_cpu_write(core0_c0count, 0); loongson3_send_ipi_single(0, SMP_ASK_C0COUNT); - while (!__get_cpu_var(core0_c0count)) { + while (!__this_cpu_read(core0_c0count)) { i++; cpu_relax(); } if (i > MAX_LOOPS) i = MAX_LOOPS; - initcount = __get_cpu_var(core0_c0count) + i; + initcount = __this_cpu_read(core0_c0count) + i; write_c0_count(initcount); } @@ -246,6 +339,11 @@ static void __init loongson3_smp_setup(void) __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } + ipi_set0_regs_init(); + ipi_clear0_regs_init(); + ipi_status0_regs_init(); + ipi_en0_regs_init(); + ipi_mailbox_buf_init(); pr_info("Detected %i available secondary CPU(s)\n", num); } @@ -313,7 +411,7 @@ static void loongson3_cpu_die(unsigned int cpu) * flush all L1 entries at first. Then, another core (usually Core 0) can * safely disable the clock of the target core. loongson3_play_dead() is * called via CKSEG1 (uncached and unmmaped) */ -static void loongson3_play_dead(int *state_addr) +static void loongson3a_play_dead(int *state_addr) { register int val; register long cpuid, core, node, count; @@ -375,6 +473,70 @@ static void loongson3_play_dead(int *state_addr) : "a1"); } +static void loongson3b_play_dead(int *state_addr) +{ + register int val; + register long cpuid, core, node, count; + register void *addr, *base, *initfunc; + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + " li %[addr], 0x80000000 \n" /* KSEG0 */ + "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */ + " cache 0, 1(%[addr]) \n" + " cache 0, 2(%[addr]) \n" + " cache 0, 3(%[addr]) \n" + " cache 1, 0(%[addr]) \n" /* flush L1 DCache */ + " cache 1, 1(%[addr]) \n" + " cache 1, 2(%[addr]) \n" + " cache 1, 3(%[addr]) \n" + " addiu %[sets], %[sets], -1 \n" + " bnez %[sets], 1b \n" + " addiu %[addr], %[addr], 0x20 \n" + " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */ + " sw %[val], (%[state_addr]) \n" + " sync \n" + " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */ + " .set pop \n" + : [addr] "=&r" (addr), [val] "=&r" (val) + : [state_addr] "r" (state_addr), + [sets] "r" (cpu_data[smp_processor_id()].dcache.sets)); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + " .set mips64 \n" + " mfc0 %[cpuid], $15, 1 \n" + " andi %[cpuid], 0x3ff \n" + " dli %[base], 0x900000003ff01000 \n" + " andi %[core], %[cpuid], 0x3 \n" + " sll %[core], 8 \n" /* get core id */ + " or %[base], %[base], %[core] \n" + " andi %[node], %[cpuid], 0xc \n" + " dsll %[node], 42 \n" /* get node id */ + " or %[base], %[base], %[node] \n" + " dsrl %[node], 30 \n" /* 15:14 */ + " or %[base], %[base], %[node] \n" + "1: li %[count], 0x100 \n" /* wait for init loop */ + "2: bnez %[count], 2b \n" /* limit mailbox access */ + " addiu %[count], -1 \n" + " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */ + " beqz %[initfunc], 1b \n" + " nop \n" + " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */ + " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */ + " ld $a1, 0x38(%[base]) \n" + " jr %[initfunc] \n" /* jump to initial PC */ + " nop \n" + " .set pop \n" + : [core] "=&r" (core), [node] "=&r" (node), + [base] "=&r" (base), [cpuid] "=&r" (cpuid), + [count] "=&r" (count), [initfunc] "=&r" (initfunc) + : /* No Input */ + : "a1"); +} + void play_dead(void) { int *state_addr; @@ -382,13 +544,48 @@ void play_dead(void) void (*play_dead_at_ckseg1)(int *); idle_task_exit(); - play_dead_at_ckseg1 = - (void *)CKSEG1ADDR((unsigned long)loongson3_play_dead); + switch (loongson_sysconf.cputype) { + case Loongson_3A: + default: + play_dead_at_ckseg1 = + (void *)CKSEG1ADDR((unsigned long)loongson3a_play_dead); + break; + case Loongson_3B: + play_dead_at_ckseg1 = + (void *)CKSEG1ADDR((unsigned long)loongson3b_play_dead); + break; + } state_addr = &per_cpu(cpu_state, cpu); mb(); play_dead_at_ckseg1(state_addr); } +void loongson3_disable_clock(int cpu) +{ + uint64_t core_id = cpu_data[cpu].core; + uint64_t package_id = cpu_data[cpu].package; + + if (loongson_sysconf.cputype == Loongson_3A) { + LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id)); + } else if (loongson_sysconf.cputype == Loongson_3B) { + if (!cpuhotplug_workaround) + LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3)); + } +} + +void loongson3_enable_clock(int cpu) +{ + uint64_t core_id = cpu_data[cpu].core; + uint64_t package_id = cpu_data[cpu].package; + + if (loongson_sysconf.cputype == Loongson_3A) { + LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id); + } else if (loongson_sysconf.cputype == Loongson_3B) { + if (!cpuhotplug_workaround) + LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3); + } +} + #define CPU_POST_DEAD_FROZEN (CPU_POST_DEAD | CPU_TASKS_FROZEN) static int loongson3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -399,12 +596,12 @@ static int loongson3_cpu_callback(struct notifier_block *nfb, case CPU_POST_DEAD: case CPU_POST_DEAD_FROZEN: pr_info("Disable clock for CPU#%d\n", cpu); - LOONGSON_CHIPCFG0 &= ~(1 << (12 + cpu)); + loongson3_disable_clock(cpu); break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: pr_info("Enable clock for CPU#%d\n", cpu); - LOONGSON_CHIPCFG0 |= 1 << (12 + cpu); + loongson3_enable_clock(cpu); break; } diff --git a/arch/mips/loongson/loongson-3/smp.h b/arch/mips/loongson/loongson-3/smp.h index 3453e8c4f2f0..d98ff654b7d7 100644 --- a/arch/mips/loongson/loongson-3/smp.h +++ b/arch/mips/loongson/loongson-3/smp.h @@ -1,29 +1,30 @@ #ifndef __LOONGSON_SMP_H_ #define __LOONGSON_SMP_H_ -/* for Loongson-3A smp support */ +/* for Loongson-3 smp support */ +extern unsigned long long smp_group[4]; /* 4 groups(nodes) in maximum in numa case */ -#define SMP_CORE_GROUP0_BASE 0x900000003ff01000 -#define SMP_CORE_GROUP1_BASE 0x900010003ff01000 -#define SMP_CORE_GROUP2_BASE 0x900020003ff01000 -#define SMP_CORE_GROUP3_BASE 0x900030003ff01000 +#define SMP_CORE_GROUP0_BASE (smp_group[0]) +#define SMP_CORE_GROUP1_BASE (smp_group[1]) +#define SMP_CORE_GROUP2_BASE (smp_group[2]) +#define SMP_CORE_GROUP3_BASE (smp_group[3]) /* 4 cores in each group(node) */ -#define SMP_CORE0_OFFSET 0x000 -#define SMP_CORE1_OFFSET 0x100 -#define SMP_CORE2_OFFSET 0x200 -#define SMP_CORE3_OFFSET 0x300 +#define SMP_CORE0_OFFSET 0x000 +#define SMP_CORE1_OFFSET 0x100 +#define SMP_CORE2_OFFSET 0x200 +#define SMP_CORE3_OFFSET 0x300 /* ipi registers offsets */ -#define STATUS0 0x00 -#define EN0 0x04 -#define SET0 0x08 -#define CLEAR0 0x0c -#define STATUS1 0x10 -#define MASK1 0x14 -#define SET1 0x18 -#define CLEAR1 0x1c -#define BUF 0x20 +#define STATUS0 0x00 +#define EN0 0x04 +#define SET0 0x08 +#define CLEAR0 0x0c +#define STATUS1 0x10 +#define MASK1 0x14 +#define SET1 0x18 +#define CLEAR1 0x1c +#define BUF 0x20 #endif diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 736c17a226e9..7a4727795a70 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -650,9 +650,9 @@ static inline int cop1_64bit(struct pt_regs *xcp) #define SIFROMREG(si, x) \ do { \ if (cop1_64bit(xcp)) \ - (si) = get_fpr32(&ctx->fpr[x], 0); \ + (si) = (int)get_fpr32(&ctx->fpr[x], 0); \ else \ - (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ + (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ } while (0) #define SITOREG(si, x) \ @@ -667,7 +667,7 @@ do { \ } \ } while (0) -#define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1)) +#define SIFROMHREG(si, x) ((si) = (int)get_fpr32(&ctx->fpr[x], 1)) #define SITOHREG(si, x) \ do { \ @@ -1827,7 +1827,7 @@ dcopuop: case -1: if (cpu_has_mips_4_5_r) - cbit = fpucondbit[MIPSInst_RT(ir) >> 2]; + cbit = fpucondbit[MIPSInst_FD(ir) >> 2]; else cbit = FPU_CSR_COND; if (rv.w) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index f2e8302fa70f..fbcd8674ff1d 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1230,19 +1230,19 @@ static void probe_pcache(void) case CPU_R14000: break; + case CPU_74K: + case CPU_1074K: + alias_74k_erratum(c); + /* Fall through. */ case CPU_M14KC: case CPU_M14KEC: case CPU_24K: case CPU_34K: - case CPU_74K: case CPU_1004K: - case CPU_1074K: case CPU_INTERAPTIV: case CPU_P5600: case CPU_PROAPTIV: case CPU_M5150: - if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) - alias_74k_erratum(c); if (!(read_c0_config7() & MIPS_CONF7_IAR) && (c->icache.waysize > PAGE_SIZE)) c->icache.flags |= MIPS_CACHE_ALIASES; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index f7b91d3a371d..7e3ea7766822 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -119,25 +119,36 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) EXPORT_SYMBOL(__flush_anon_page); -void __update_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) +static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address) { struct page *page; - unsigned long pfn, addr; - int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; + unsigned long pfn = pte_pfn(pteval); - pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; + page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { - addr = (unsigned long) page_address(page); - if (exec || pages_do_alias(addr, address & PAGE_MASK)) - flush_data_cache_page(addr); + unsigned long page_addr = (unsigned long) page_address(page); + + if (!cpu_has_ic_fills_f_dc || + pages_do_alias(page_addr, address & PAGE_MASK)) + flush_data_cache_page(page_addr); ClearPageDcacheDirty(page); } } +void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) { + if (pte_present(pteval)) + mips_flush_dcache_from_pte(pteval, addr); + } + + set_pte(ptep, pteval); +} + unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 44b6dff5aba2..33ba3c558fe4 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -16,6 +16,7 @@ #include <linux/string.h> #include <linux/gfp.h> #include <linux/highmem.h> +#include <linux/dma-contiguous.h> #include <asm/cache.h> #include <asm/cpu-type.h> @@ -128,23 +129,30 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; + struct page *page = NULL; + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) return ret; gfp = massage_gfp_flags(dev, gfp); - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret) { - memset(ret, 0, size); - *dma_handle = plat_map_dma_mem(dev, ret, size); - - if (!plat_device_is_coherent(dev)) { - dma_cache_wback_inv((unsigned long) ret, size); - if (!hw_coherentio) - ret = UNCAC_ADDR(ret); - } + if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) + page = dma_alloc_from_contiguous(dev, + count, get_order(size)); + if (!page) + page = alloc_pages(gfp, get_order(size)); + + if (!page) + return NULL; + + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = plat_map_dma_mem(dev, ret, size); + if (!plat_device_is_coherent(dev)) { + dma_cache_wback_inv((unsigned long) ret, size); + if (!hw_coherentio) + ret = UNCAC_ADDR(ret); } return ret; @@ -164,6 +172,8 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct page *page = NULL; if (dma_release_from_coherent(dev, order, vaddr)) return; @@ -173,7 +183,10 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, if (!plat_device_is_coherent(dev) && !hw_coherentio) addr = CAC_ADDR(addr); - free_pages(addr, get_order(size)); + page = virt_to_page((void *) addr); + + if (!dma_release_from_contiguous(dev, page, count)) + __free_pages(page, get_order(size)); } static inline void __dma_sync_virtual(void *addr, size_t size, diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 6e4413330e36..f42e35e42790 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -53,6 +53,7 @@ */ unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL_GPL(empty_zero_page); +EXPORT_SYMBOL(zero_page_mask); /* * Not static inline because used by IP27 special magic initialization code @@ -325,6 +326,38 @@ static inline void mem_init_free_highmem(void) #endif } +unsigned __weak platform_maar_init(unsigned num_maars) +{ + return 0; +} + +static void maar_init(void) +{ + unsigned num_maars, used, i; + + if (!cpu_has_maar) + return; + + /* Detect the number of MAARs */ + write_c0_maari(~0); + back_to_back_c0_hazard(); + num_maars = read_c0_maari() + 1; + + /* MAARs should be in pairs */ + WARN_ON(num_maars % 2); + + /* Configure the required MAARs */ + used = platform_maar_init(num_maars / 2); + + /* Disable any further MAARs */ + for (i = (used * 2); i < num_maars; i++) { + write_c0_maari(i); + back_to_back_c0_hazard(); + write_c0_maar(0); + back_to_back_c0_hazard(); + } +} + void __init mem_init(void) { #ifdef CONFIG_HIGHMEM @@ -337,6 +370,7 @@ void __init mem_init(void) #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + maar_init(); free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_free_highmem(); diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index d657493ef561..4094bbd42adf 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -158,7 +158,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); - if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { + if (cpu_context(cpu, vma->vm_mm) != 0) { unsigned long flags; int oldpid, newpid, idx; diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 3914e27456f2..fa6ebd4bc9e9 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -57,6 +57,7 @@ void local_flush_tlb_all(void) local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); + htw_stop(); write_c0_entrylo0(0); write_c0_entrylo1(0); @@ -90,6 +91,7 @@ void local_flush_tlb_all(void) } tlbw_use_hazard(); write_c0_entryhi(old_ctx); + htw_start(); flush_itlb(); local_irq_restore(flags); } @@ -131,6 +133,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); + htw_stop(); while (start < end) { int idx; @@ -151,6 +154,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } tlbw_use_hazard(); write_c0_entryhi(oldpid); + htw_start(); } else { drop_mmu_context(mm, cpu); } @@ -174,6 +178,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); + htw_stop(); while (start < end) { int idx; @@ -195,6 +200,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } tlbw_use_hazard(); write_c0_entryhi(pid); + htw_start(); } else { local_flush_tlb_all(); } @@ -214,6 +220,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) page &= (PAGE_MASK << 1); local_irq_save(flags); oldpid = read_c0_entryhi(); + htw_stop(); write_c0_entryhi(page | newpid); mtc0_tlbw_hazard(); tlb_probe(); @@ -231,6 +238,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) finish: write_c0_entryhi(oldpid); + htw_start(); flush_itlb_vm(vma); local_irq_restore(flags); } @@ -247,6 +255,7 @@ void local_flush_tlb_one(unsigned long page) local_irq_save(flags); oldpid = read_c0_entryhi(); + htw_stop(); page &= (PAGE_MASK << 1); write_c0_entryhi(page); mtc0_tlbw_hazard(); @@ -263,6 +272,7 @@ void local_flush_tlb_one(unsigned long page) tlbw_use_hazard(); } write_c0_entryhi(oldpid); + htw_start(); flush_itlb(); local_irq_restore(flags); } @@ -351,6 +361,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); + htw_stop(); old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); write_c0_wired(wired + 1); @@ -366,6 +377,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_entryhi(old_ctx); tlbw_use_hazard(); /* What is the hazard here? */ + htw_start(); write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); @@ -391,6 +403,51 @@ int __init has_transparent_hugepage(void) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +/* + * Used for loading TLB entries before trap_init() has started, when we + * don't actually want to add a wired entry which remains throughout the + * lifetime of the system + */ + +int temp_tlb_entry __cpuinitdata; + +__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask) +{ + int ret = 0; + unsigned long flags; + unsigned long wired; + unsigned long old_pagemask; + unsigned long old_ctx; + + local_irq_save(flags); + /* Save old context and create impossible VPN2 value */ + old_ctx = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + wired = read_c0_wired(); + if (--temp_tlb_entry < wired) { + printk(KERN_WARNING + "No TLB space left for add_temporary_entry\n"); + ret = -ENOSPC; + goto out; + } + + write_c0_index(temp_tlb_entry); + write_c0_pagemask(pagemask); + write_c0_entryhi(entryhi); + write_c0_entrylo0(entrylo0); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + + write_c0_entryhi(old_ctx); + write_c0_pagemask(old_pagemask); +out: + local_irq_restore(flags); + return ret; +} + static int ntlb; static int __init set_ntlb(char *str) { @@ -431,6 +488,8 @@ static void r4k_tlb_configure(void) write_c0_pagegrain(pg); } + temp_tlb_entry = current_cpu_data.tlbsize - 1; + /* From this point on the ARC firmware is dead. */ local_flush_tlb_all(); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index e80e10bafc83..a08dd53a1cc5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -429,6 +429,7 @@ static void build_r3000_tlb_refill_handler(void) (unsigned int)(p - tlb_handler)); memcpy((void *)ebase, tlb_handler, 0x80); + local_flush_icache_range(ebase, ebase + 0x80); dump_handler("r3000_tlb_refill", (u32 *)ebase, 32); } @@ -1299,6 +1300,7 @@ static void build_r4000_tlb_refill_handler(void) } #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_l_tlb_huge_update(&l, p); + UASM_i_LW(&p, K0, 0, K1); build_huge_update_entries(&p, htlb_info.huge_pte, K1); build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, htlb_info.restore_scratch); @@ -1415,6 +1417,7 @@ static void build_r4000_tlb_refill_handler(void) final_len); memcpy((void *)ebase, final_handler, 0x100); + local_flush_icache_range(ebase, ebase + 0x100); dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); } @@ -1919,7 +1922,7 @@ static void build_r4000_tlb_load_handler(void) if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - if (cpu_has_rixi) { + if (cpu_has_rixi && !cpu_has_rixiex) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. @@ -1986,7 +1989,7 @@ static void build_r4000_tlb_load_handler(void) build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); build_tlb_probe_entry(&p); - if (cpu_has_rixi) { + if (cpu_has_rixi && !cpu_has_rixiex) { /* * If the page is not _PAGE_VALID, RI or XI could not * have triggered it. Skip the expensive test.. @@ -2194,6 +2197,94 @@ static void flush_tlb_handlers(void) (unsigned long)tlbmiss_handler_setup_pgd_end); } +static void print_htw_config(void) +{ + unsigned long config; + unsigned int pwctl; + const int field = 2 * sizeof(unsigned long); + + config = read_c0_pwfield(); + pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n", + field, config, + (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT, + (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT, + (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT, + (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT, + (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT); + + config = read_c0_pwsize(); + pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", + field, config, + (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT, + (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT, + (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT, + (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT, + (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT); + + pwctl = read_c0_pwctl(); + pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", + pwctl, + (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT, + (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT, + (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT, + (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT); +} + +static void config_htw_params(void) +{ + unsigned long pwfield, pwsize, ptei; + unsigned int config; + + /* + * We are using 2-level page tables, so we only need to + * setup GDW and PTW appropriately. UDW and MDW will remain 0. + * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to + * write values less than 0xc in these fields because the entire + * write will be dropped. As a result of which, we must preserve + * the original reset values and overwrite only what we really want. + */ + + pwfield = read_c0_pwfield(); + /* re-initialize the GDI field */ + pwfield &= ~MIPS_PWFIELD_GDI_MASK; + pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT; + /* re-initialize the PTI field including the even/odd bit */ + pwfield &= ~MIPS_PWFIELD_PTI_MASK; + pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; + /* Set the PTEI right shift */ + ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; + pwfield |= ptei; + write_c0_pwfield(pwfield); + /* Check whether the PTEI value is supported */ + back_to_back_c0_hazard(); + pwfield = read_c0_pwfield(); + if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT) + != ptei) { + pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled", + ptei); + /* + * Drop option to avoid HTW being enabled via another path + * (eg htw_reset()) + */ + current_cpu_data.options &= ~MIPS_CPU_HTW; + return; + } + + pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; + pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; + write_c0_pwsize(pwsize); + + /* Make sure everything is set before we enable the HTW */ + back_to_back_c0_hazard(); + + /* Enable HTW and disable the rest of the pwctl fields */ + config = 1 << MIPS_PWCTL_PWEN_SHIFT; + write_c0_pwctl(config); + pr_info("Hardware Page Table Walker enabled\n"); + + print_htw_config(); +} + void build_tlb_refill_handler(void) { /* @@ -2258,5 +2349,8 @@ void build_tlb_refill_handler(void) } if (cpu_has_local_ebase) build_r4000_tlb_refill_handler(); + if (cpu_has_htw) + config_htw_params(); + } } diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index ecc2785f7858..e4f43baa8f67 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -42,6 +42,10 @@ static unsigned int ipi_map[NR_CPUS]; static DEFINE_RAW_SPINLOCK(mips_irq_lock); +#ifdef CONFIG_MIPS_GIC_IPI +DECLARE_BITMAP(ipi_ints, GIC_NUM_INTRS); +#endif + static inline int mips_pcibios_iack(void) { int irq; @@ -125,16 +129,22 @@ static void malta_hw0_irqdispatch(void) static void malta_ipi_irqdispatch(void) { - int irq; +#ifdef CONFIG_MIPS_GIC_IPI + unsigned long irq; + DECLARE_BITMAP(pending, GIC_NUM_INTRS); - if (gic_compare_int()) - do_IRQ(MIPS_GIC_IRQ_BASE); + gic_get_int_mask(pending, ipi_ints); + + irq = find_first_bit(pending, GIC_NUM_INTRS); - irq = gic_get_int(); - if (irq < 0) - return; /* interrupt has already been cleared */ + while (irq < GIC_NUM_INTRS) { + do_IRQ(MIPS_GIC_IRQ_BASE + irq); - do_IRQ(MIPS_GIC_IRQ_BASE + irq); + irq = find_next_bit(pending, GIC_NUM_INTRS, irq + 1); + } +#endif + if (gic_compare_int()) + do_IRQ(MIPS_GIC_IRQ_BASE); } static void corehi_irqdispatch(void) @@ -427,8 +437,9 @@ static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin) gic_intr_map[intr].pin = cpupin; gic_intr_map[intr].polarity = GIC_POL_POS; gic_intr_map[intr].trigtype = GIC_TRIG_EDGE; - gic_intr_map[intr].flags = GIC_FLAG_IPI; + gic_intr_map[intr].flags = 0; ipi_map[cpu] |= (1 << (cpupin + 2)); + bitmap_set(ipi_ints, intr, 1); } static void __init fill_ipi_map(void) diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 6d9773096750..8fddd2cdbff7 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -16,6 +16,7 @@ #include <linux/string.h> #include <asm/bootinfo.h> +#include <asm/maar.h> #include <asm/sections.h> #include <asm/fw/fw.h> @@ -34,13 +35,19 @@ fw_memblock_t * __init fw_getmdesc(int eva) /* otherwise look in the environment */ memsize_str = fw_getenv("memsize"); - if (memsize_str) - tmp = kstrtol(memsize_str, 0, &memsize); + if (memsize_str) { + tmp = kstrtoul(memsize_str, 0, &memsize); + if (tmp) + pr_warn("Failed to read the 'memsize' env variable.\n"); + } if (eva) { /* Look for ememsize for EVA */ ememsize_str = fw_getenv("ememsize"); - if (ememsize_str) - tmp = kstrtol(ememsize_str, 0, &ememsize); + if (ememsize_str) { + tmp = kstrtoul(ememsize_str, 0, &ememsize); + if (tmp) + pr_warn("Failed to read the 'ememsize' env variable.\n"); + } } if (!memsize && !ememsize) { pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); @@ -164,3 +171,28 @@ void __init prom_free_prom_memory(void) addr, addr + boot_mem_map.map[i].size); } } + +unsigned platform_maar_init(unsigned num_pairs) +{ + phys_addr_t mem_end = (physical_memsize & ~0xffffull) - 1; + struct maar_config cfg[] = { + /* DRAM preceding I/O */ + { 0x00000000, 0x0fffffff, MIPS_MAAR_S }, + + /* DRAM following I/O */ + { 0x20000000, mem_end, MIPS_MAAR_S }, + + /* DRAM alias in upper half of physical */ + { 0x80000000, 0x80000000 + mem_end, MIPS_MAAR_S }, + }; + unsigned i, num_cfg = ARRAY_SIZE(cfg); + + /* If DRAM fits before I/O, drop the region following it */ + if (physical_memsize <= 0x10000000) { + num_cfg--; + for (i = 1; i < num_cfg; i++) + cfg[i] = cfg[i + 1]; + } + + return maar_config(cfg, num_cfg, num_pairs); +} diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile index 071786fa234b..febf4334545e 100644 --- a/arch/mips/mti-sead3/Makefile +++ b/arch/mips/mti-sead3/Makefile @@ -19,9 +19,5 @@ obj-y += sead3-i2c-dev.o sead3-i2c.o \ obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o obj-$(CONFIG_USB_EHCI_HCD) += sead3-ehci.o -obj-$(CONFIG_OF) += sead3.dtb.o CFLAGS_sead3-setup.o = -I$(src)/../../../scripts/dtc/libfdt - -$(obj)/%.dtb: $(obj)/%.dts - $(call if_changed,dtc) diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index b87390a56a2f..9b55143d19db 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -131,7 +131,7 @@ * @target: Memory location for the compiled filter */ struct jit_ctx { - const struct sk_filter *skf; + const struct bpf_prog *skf; unsigned int prologue_bytes; u32 idx; u32 flags; @@ -163,6 +163,19 @@ do { \ (ctx)->idx++; \ } while (0) +/* + * Similar to emit_instr but it must be used when we need to emit + * 32-bit or 64-bit instructions + */ +#define emit_long_instr(ctx, func, ...) \ +do { \ + if ((ctx)->target != NULL) { \ + u32 *p = &(ctx)->target[ctx->idx]; \ + UASM_i_##func(&p, ##__VA_ARGS__); \ + } \ + (ctx)->idx++; \ +} while (0) + /* Determine if immediate is within the 16-bit signed range */ static inline bool is_range16(s32 imm) { @@ -218,13 +231,6 @@ static inline void emit_ori(unsigned int dst, unsigned src, u32 imm, } } - -static inline void emit_daddu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, daddu, dst, src1, src2); -} - static inline void emit_daddiu(unsigned int dst, unsigned int src, int imm, struct jit_ctx *ctx) { @@ -283,11 +289,7 @@ static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) { - if (config_enabled(CONFIG_64BIT)) - emit_instr(ctx, daddiu, r_sp, r_sp, offset); - else - emit_instr(ctx, addiu, r_sp, r_sp, offset); - + emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset); } static inline void emit_subu(unsigned int dst, unsigned int src1, @@ -365,10 +367,7 @@ static inline void emit_store_stack_reg(ptr reg, ptr base, unsigned int offset, struct jit_ctx *ctx) { - if (config_enabled(CONFIG_64BIT)) - emit_instr(ctx, sd, reg, offset, base); - else - emit_instr(ctx, sw, reg, offset, base); + emit_long_instr(ctx, SW, reg, offset, base); } static inline void emit_store(ptr reg, ptr base, unsigned int offset, @@ -381,10 +380,7 @@ static inline void emit_load_stack_reg(ptr reg, ptr base, unsigned int offset, struct jit_ctx *ctx) { - if (config_enabled(CONFIG_64BIT)) - emit_instr(ctx, ld, reg, offset, base); - else - emit_instr(ctx, lw, reg, offset, base); + emit_long_instr(ctx, LW, reg, offset, base); } static inline void emit_load(unsigned int reg, unsigned int base, @@ -458,10 +454,7 @@ static inline void emit_load_ptr(unsigned int dst, unsigned int src, int imm, struct jit_ctx *ctx) { /* src contains the base addr of the 32/64-pointer */ - if (config_enabled(CONFIG_64BIT)) - emit_instr(ctx, ld, dst, imm, src); - else - emit_instr(ctx, lw, dst, imm, src); + emit_long_instr(ctx, LW, dst, imm, src); } /* load a function pointer to register */ @@ -483,10 +476,7 @@ static inline void emit_load_func(unsigned int reg, ptr imm, /* Move to real MIPS register */ static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) { - if (config_enabled(CONFIG_64BIT)) - emit_daddu(dst, src, r_zero, ctx); - else - emit_addu(dst, src, r_zero, ctx); + emit_long_instr(ctx, ADDU, dst, src, r_zero); } /* Move to JIT (32-bit) register */ @@ -623,10 +613,7 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) if (ctx->flags & SEEN_MEM) { if (real_off % (RSIZE * 2)) real_off += RSIZE; - if (config_enabled(CONFIG_64BIT)) - emit_daddiu(r_M, r_sp, real_off, ctx); - else - emit_addiu(r_M, r_sp, real_off, ctx); + emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off); } } @@ -765,31 +752,10 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) return (u64)err << 32 | ntohl(ret); } -#ifdef __BIG_ENDIAN_BITFIELD -#define PKT_TYPE_MAX (7 << 5) -#else -#define PKT_TYPE_MAX 7 -#endif -static int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - u8 *ct = (u8 *)&skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); - return -1; -} - static int build_body(struct jit_ctx *ctx) { void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; - const struct sk_filter *prog = ctx->skf; + const struct bpf_prog *prog = ctx->skf; const struct sock_filter *inst; unsigned int i, off, load_order, condt; u32 k, b_off __maybe_unused; @@ -1262,7 +1228,7 @@ jmp_cmp: emit_half_load(r_A, r_skb, off, ctx); #ifdef CONFIG_CPU_LITTLE_ENDIAN /* This needs little endian fixup */ - if (cpu_has_mips_r2) { + if (cpu_has_wsbh) { /* R2 and later have the wsbh instruction */ emit_wsbh(r_A, r_A, ctx); } else { @@ -1332,11 +1298,7 @@ jmp_cmp: case BPF_ANC | SKF_AD_PKTTYPE: ctx->flags |= SEEN_SKB; - off = pkt_type_offset(); - - if (off < 0) - return -1; - emit_load_byte(r_tmp, r_skb, off, ctx); + emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); /* Keep only the last 3 bits */ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); #ifdef __BIG_ENDIAN_BITFIELD @@ -1369,7 +1331,7 @@ jmp_cmp: int bpf_jit_enable __read_mostly; -void bpf_jit_compile(struct sk_filter *fp) +void bpf_jit_compile(struct bpf_prog *fp) { struct jit_ctx ctx; unsigned int alloc_size, tmp_idx; @@ -1417,15 +1379,16 @@ void bpf_jit_compile(struct sk_filter *fp) bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); fp->bpf_func = (void *)ctx.target; - fp->jited = 1; + fp->jited = true; out: kfree(ctx.offsets); } -void bpf_jit_free(struct sk_filter *fp) +void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig index 4eb683aef7d7..0823321c10e0 100644 --- a/arch/mips/netlogic/Kconfig +++ b/arch/mips/netlogic/Kconfig @@ -4,6 +4,7 @@ if NLM_XLP_BOARD config DT_XLP_EVP bool "Built-in device tree for XLP EVP boards" default y + select BUILTIN_DTB help Add an FDT blob for XLP EVP boards into the kernel. This DTB will be used if the firmware does not pass in a DTB @@ -13,6 +14,7 @@ config DT_XLP_EVP config DT_XLP_SVP bool "Built-in device tree for XLP SVP boards" default y + select BUILTIN_DTB help Add an FDT blob for XLP VP boards into the kernel. This DTB will be used if the firmware does not pass in a DTB @@ -22,6 +24,7 @@ config DT_XLP_SVP config DT_XLP_FVP bool "Built-in device tree for XLP FVP boards" default y + select BUILTIN_DTB help Add an FDT blob for XLP FVP board into the kernel. This DTB will be used if the firmware does not pass in a DTB @@ -31,6 +34,7 @@ config DT_XLP_FVP config DT_XLP_GVP bool "Built-in device tree for XLP GVP boards" default y + select BUILTIN_DTB help Add an FDT blob for XLP GVP board into the kernel. This DTB will be used if the firmware does not pass in a DTB diff --git a/arch/mips/netlogic/Makefile b/arch/mips/netlogic/Makefile index 7602d1386614..36d169b2ca6d 100644 --- a/arch/mips/netlogic/Makefile +++ b/arch/mips/netlogic/Makefile @@ -1,4 +1,3 @@ obj-$(CONFIG_NLM_COMMON) += common/ obj-$(CONFIG_CPU_XLR) += xlr/ obj-$(CONFIG_CPU_XLP) += xlp/ -obj-$(CONFIG_CPU_XLP) += dts/ diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile deleted file mode 100644 index 25c8e873ee25..000000000000 --- a/arch/mips/netlogic/dts/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o -obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o -obj-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb.o -obj-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb.o diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index ff8a5539b363..6523d558ff5a 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_LASAT) += pci-lasat.o obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o -obj-$(CONFIG_LEMOTE_MACH3A) += fixup-loongson3.o ops-loongson3.o +obj-$(CONFIG_LOONGSON_MACH3X) += fixup-loongson3.o ops-loongson3.o obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index ab0c5d14c6f7..63bbe07a1ccd 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -73,8 +73,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) * wants. Most devices only want 1, which will give * configured_private_bits and request_private_bits equal 0. */ - pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, - &control); + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); /* * If the number of private bits has been configured then use @@ -176,8 +175,7 @@ msi_irq_allocated: /* Update the number of IRQs the device has available to it */ control &= ~PCI_MSI_FLAGS_QSIZE; control |= request_private_bits << 4; - pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, - control); + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); irq_set_msi_desc(irq, desc); write_msi_msg(irq, &msg); diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c index 0e046d82e4e3..d54ea93651ac 100644 --- a/arch/mips/pci/ops-tx4927.c +++ b/arch/mips/pci/ops-tx4927.c @@ -199,8 +199,6 @@ static struct { char *tx4927_pcibios_setup(char *str) { - unsigned long val; - if (!strncmp(str, "trdyto=", 7)) { u8 val = 0; if (kstrtou8(str + 7, 0, &val) == 0) diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 563d1f61d6ee..c19600a03460 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -7,6 +7,7 @@ * Support for all devices (greater than 16) added by David Gathright. */ +#include <linux/clk.h> #include <linux/export.h> #include <linux/types.h> #include <linux/pci.h> @@ -364,6 +365,7 @@ static int alchemy_pci_probe(struct platform_device *pdev) void __iomem *virt_io; unsigned long val; struct resource *r; + struct clk *c; int ret; /* need at least PCI IRQ mapping table */ @@ -393,11 +395,24 @@ static int alchemy_pci_probe(struct platform_device *pdev) goto out1; } + c = clk_get(&pdev->dev, "pci_clko"); + if (IS_ERR(c)) { + dev_err(&pdev->dev, "unable to find PCI clock\n"); + ret = PTR_ERR(c); + goto out2; + } + + ret = clk_prepare_enable(c); + if (ret) { + dev_err(&pdev->dev, "cannot enable PCI clock\n"); + goto out6; + } + ctx->regs = ioremap_nocache(r->start, resource_size(r)); if (!ctx->regs) { dev_err(&pdev->dev, "cannot map pci regs\n"); ret = -ENODEV; - goto out2; + goto out5; } /* map parts of the PCI IO area */ @@ -465,12 +480,19 @@ static int alchemy_pci_probe(struct platform_device *pdev) register_syscore_ops(&alchemy_pci_pmops); register_pci_controller(&ctx->alchemy_pci_ctrl); + dev_info(&pdev->dev, "PCI controller at %ld MHz\n", + clk_get_rate(c) / 1000000); + return 0; out4: iounmap(virt_io); out3: iounmap(ctx->regs); +out5: + clk_disable_unprepare(c); +out6: + clk_put(c); out2: release_mem_region(r->start, resource_size(r)); out1: diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index cb1ef9984069..37fe8e7887e2 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -218,7 +218,7 @@ static int ltq_pci_probe(struct platform_device *pdev) res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_cfg || !res_bridge) { - dev_err(&pdev->dev, "missing memory reources\n"); + dev_err(&pdev->dev, "missing memory resources\n"); return -EINVAL; } diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c index 941744aabb51..f914c753de21 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq.c +++ b/arch/mips/pmcs-msp71xx/msp_irq.c @@ -51,7 +51,7 @@ static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); } * the range 40-71. */ -asmlinkage void plat_irq_dispatch(struct pt_regs *regs) +asmlinkage void plat_irq_dispatch(void) { u32 pending; diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c index 521e5963df05..2129e67723ff 100644 --- a/arch/mips/power/cpu.c +++ b/arch/mips/power/cpu.c @@ -7,7 +7,7 @@ * Author: Hu Hongbing <huhb@lemote.com> * Wu Zhangjin <wuzhangjin@gmail.com> */ -#include <asm/suspend.h> +#include <asm/sections.h> #include <asm/fpu.h> #include <asm/dsp.h> diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 4a296655f446..77e8a9620e18 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -42,18 +42,22 @@ choice config DTB_RT2880_EVAL bool "RT2880 eval kit" depends on SOC_RT288X + select BUILTIN_DTB config DTB_RT305X_EVAL bool "RT305x eval kit" depends on SOC_RT305X + select BUILTIN_DTB config DTB_RT3883_EVAL bool "RT3883 eval kit" depends on SOC_RT3883 + select BUILTIN_DTB config DTB_MT7620A_EVAL bool "MT7620A eval kit" depends on SOC_MT7620 + select BUILTIN_DTB endchoice diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile index 98ae349827be..2c09c8aa0ae2 100644 --- a/arch/mips/ralink/Makefile +++ b/arch/mips/ralink/Makefile @@ -16,5 +16,3 @@ obj-$(CONFIG_SOC_RT3883) += rt3883.o obj-$(CONFIG_SOC_MT7620) += mt7620.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o - -obj-y += dts/ diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile deleted file mode 100644 index 18194fa93e80..000000000000 --- a/arch/mips/ralink/dts/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o -obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o -obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o -obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 251395210e23..7c4598cb6de8 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -81,7 +81,7 @@ static int __init plat_of_setup(void) panic("device tree not present"); strlcpy(of_ids[0].compatible, soc_info.compatible, len); - strncpy(of_ids[1].compatible, "palmbus", len); + strlcpy(of_ids[1].compatible, "palmbus", len); if (of_platform_populate(NULL, of_ids, NULL, NULL)) panic("failed to populate DT"); diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c index e38692a44e69..5bb29b3790ff 100644 --- a/arch/mips/ralink/timer.c +++ b/arch/mips/ralink/timer.c @@ -58,7 +58,7 @@ static irqreturn_t rt_timer_irq(int irq, void *_rt) static int rt_timer_request(struct rt_timer *rt) { - int err = request_irq(rt->irq, rt_timer_irq, IRQF_DISABLED, + int err = request_irq(rt->irq, rt_timer_irq, 0, dev_name(rt->dev), rt); if (err) { dev_err(rt->dev, "failed to request irq\n"); diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 3af00b2a26ee..e31e8cdcb296 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -223,6 +223,7 @@ static struct platform_device rb532_wdt = { static struct plat_serial8250_port rb532_uart_res[] = { { + .type = PORT_16550A, .membase = (char *)KSEG1ADDR(REGBASE + UART0BASE), .irq = UART0_IRQ, .regshift = 2, @@ -250,28 +251,6 @@ static struct platform_device *rb532_devs[] = { &rb532_wdt }; -static void __init parse_mac_addr(char *macstr) -{ - int i, h, l; - - for (i = 0; i < 6; i++) { - if (i != 5 && *(macstr + 2) != ':') - return; - - h = hex_to_bin(*macstr++); - if (h == -1) - return; - - l = hex_to_bin(*macstr++); - if (l == -1) - return; - - macstr++; - korina_dev0_data.mac[i] = (h << 4) + l; - } -} - - /* NAND definitions */ #define NAND_CHIP_DELAY 25 @@ -333,7 +312,10 @@ static int __init plat_setup_devices(void) static int __init setup_kmac(char *s) { printk(KERN_INFO "korina mac = %s\n", s); - parse_mac_addr(s); + if (!mac_pton(s, korina_dev0_data.mac)) { + printk(KERN_ERR "Invalid mac\n"); + return -EINVAL; + } return 0; } diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c index 8e52446286ca..8f1b86d4da84 100644 --- a/arch/mips/sgi-ip22/ip22-gio.c +++ b/arch/mips/sgi-ip22/ip22-gio.c @@ -27,8 +27,14 @@ static struct { { .name = "SGI GR2/GR3", .id = 0x7f }, }; +static void gio_bus_release(struct device *dev) +{ + kfree(dev); +} + static struct device gio_bus = { .init_name = "gio", + .release = &gio_bus_release, }; /** @@ -413,8 +419,10 @@ int __init ip22_gio_init(void) int ret; ret = device_register(&gio_bus); - if (ret) + if (ret) { + put_device(&gio_bus); return ret; + } ret = bus_register(&gio_bus_type); if (!ret) { diff --git a/arch/mips/txx9/generic/7segled.c b/arch/mips/txx9/generic/7segled.c index 4642f56e70e5..566c58bd44d0 100644 --- a/arch/mips/txx9/generic/7segled.c +++ b/arch/mips/txx9/generic/7segled.c @@ -83,6 +83,11 @@ static struct bus_type tx_7segled_subsys = { .dev_name = "7segled", }; +static void tx_7segled_release(struct device *dev) +{ + kfree(dev); +} + static int __init tx_7segled_init_sysfs(void) { int error, i; @@ -103,11 +108,14 @@ static int __init tx_7segled_init_sysfs(void) } dev->id = i; dev->bus = &tx_7segled_subsys; + dev->release = &tx_7segled_release; error = device_register(dev); - if (!error) { - device_create_file(dev, &dev_attr_ascii); - device_create_file(dev, &dev_attr_raw); + if (error) { + put_device(dev); + return error; } + device_create_file(dev, &dev_attr_ascii); + device_create_file(dev, &dev_attr_raw); } return error; } diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c index 28713274e0cc..a77698ff2b6f 100644 --- a/arch/mips/txx9/generic/pci.c +++ b/arch/mips/txx9/generic/pci.c @@ -268,7 +268,7 @@ static int txx9_i8259_irq_setup(int irq) return err; } -static void quirk_slc90e66_bridge(struct pci_dev *dev) +static void __init_refok quirk_slc90e66_bridge(struct pci_dev *dev) { int irq; /* PCI/ISA Bridge interrupt */ u8 reg_64; @@ -331,7 +331,7 @@ static void quirk_slc90e66_ide(struct pci_dev *dev) * !!! DO NOT REMOVE THIS COMMENT IT IS REQUIRED BY SMSC !!! */ dat |= 0x01; - pci_write_config_byte(dev, regs[i], dat); + pci_write_config_byte(dev, 0x5c, dat); pci_read_config_byte(dev, 0x5c, &dat); printk(KERN_CONT " REG5C %02x", dat); printk(KERN_CONT "\n"); diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index dd2cf25b5ae5..2791b8641df6 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -789,11 +789,11 @@ void __init txx9_iocled_init(unsigned long baseaddr, if (platform_device_add(pdev)) goto out_pdev; return; + out_pdev: platform_device_put(pdev); out_gpio: - if (gpiochip_remove(&iocled->chip)) - return; + gpiochip_remove(&iocled->chip); out_unmap: iounmap(iocled->mmioaddr); out_free: @@ -937,6 +937,14 @@ static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj, return size; } +static void txx9_device_release(struct device *dev) +{ + struct txx9_sramc_dev *tdev; + + tdev = container_of(dev, struct txx9_sramc_dev, dev); + kfree(tdev); +} + void __init txx9_sramc_init(struct resource *r) { struct txx9_sramc_dev *dev; @@ -951,8 +959,11 @@ void __init txx9_sramc_init(struct resource *r) return; size = resource_size(r); dev->base = ioremap(r->start, size); - if (!dev->base) - goto exit; + if (!dev->base) { + kfree(dev); + return; + } + dev->dev.release = &txx9_device_release; dev->dev.bus = &txx9_sramc_subsys; sysfs_bin_attr_init(&dev->bindata_attr); dev->bindata_attr.attr.name = "bindata"; @@ -963,17 +974,15 @@ void __init txx9_sramc_init(struct resource *r) dev->bindata_attr.private = dev; err = device_register(&dev->dev); if (err) - goto exit; + goto exit_put; err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr); if (err) { device_unregister(&dev->dev); - goto exit; - } - return; -exit: - if (dev) { - if (dev->base) - iounmap(dev->base); + iounmap(dev->base); kfree(dev); } + return; +exit_put: + put_device(&dev->dev); + return; } |