diff options
author | Linus Torvalds | 2021-04-29 11:28:08 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-04-29 11:28:08 -0700 |
commit | 77d51337d650086643e1e96b8a7e1e6cbf0b09ff (patch) | |
tree | 9aae3449eaa5fd3a280b0e95eaab7ad172536b2c | |
parent | 3644286f6cbcea86f6fa4d308e7ac06bf2a3715a (diff) | |
parent | 7e9be673cb1b0be0f4279a960c2ecb28a147c327 (diff) |
Merge tag 'mips_5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Thomas Bogendoerfer:
- removed get_fs/set_fs
- removed broken/unmaintained MIPS KVM trap and emulate support
- added support for Loongson-2K1000
- fixes and cleanups
* tag 'mips_5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (107 commits)
MIPS: BCM63XX: Use BUG_ON instead of condition followed by BUG.
MIPS: select ARCH_KEEP_MEMBLOCK unconditionally
mips: Do not include hi and lo in clobber list for R6
MIPS:DTS:Correct the license for Loongson-2K
MIPS:DTS:Fix label name and interrupt number of ohci for Loongson-2K
MIPS: Avoid handcoded DIVU in `__div64_32' altogether
lib/math/test_div64: Correct the spelling of "dividend"
lib/math/test_div64: Fix error message formatting
mips/bootinfo:correct some comments of fw_arg
MIPS: Avoid DIVU in `__div64_32' is result would be zero
MIPS: Reinstate platform `__div64_32' handler
div64: Correct inline documentation for `do_div'
lib/math: Add a `do_div' test module
MIPS: Makefile: Replace -pg with CC_FLAGS_FTRACE
MIPS: pci-legacy: revert "use generic pci_enable_resources"
MIPS: Loongson64: Add kexec/kdump support
MIPS: pci-legacy: use generic pci_enable_resources
MIPS: pci-legacy: remove busn_resource field
MIPS: pci-legacy: remove redundant info messages
MIPS: pci-legacy: stop using of_pci_range_to_resource
...
152 files changed, 2682 insertions, 6022 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml index 6960036975fa..c45c92a3d41f 100644 --- a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml +++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml @@ -47,7 +47,6 @@ examples: spi-max-frequency = <3125000>; spi-3wire; - spi-cs-high; reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>; diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml index f38e0113f360..067165c4b836 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml @@ -10,9 +10,9 @@ maintainers: - Jiaxun Yang <jiaxun.yang@flygoat.com> description: | - This interrupt controller is found in the Loongson-3 family of chips as the primary - package interrupt controller which can route local I/O interrupt to interrupt lines - of cores. + This interrupt controller is found in the Loongson-3 family of chips and + Loongson-2K1000 chip, as the primary package interrupt controller which + can route local I/O interrupt to interrupt lines of cores. allOf: - $ref: /schemas/interrupt-controller.yaml# @@ -22,9 +22,17 @@ properties: oneOf: - const: loongson,liointc-1.0 - const: loongson,liointc-1.0a + - const: loongson,liointc-2.0 reg: - maxItems: 1 + minItems: 1 + maxItems: 3 + + reg-names: + items: + - const: main + - const: isr0 + - const: isr1 interrupt-controller: true @@ -69,6 +77,26 @@ required: unevaluatedProperties: false +if: + properties: + compatible: + contains: + enum: + - loongson,liointc-2.0 + +then: + properties: + reg: + minItems: 3 + + required: + - reg-names + +else: + properties: + reg: + maxItems: 1 + examples: - | iointc: interrupt-controller@3ff01400 { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d89efba3d8a4..702648f60e41 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,13 +4,15 @@ config MIPS default y select ARCH_32BIT_OFF_T if !64BIT select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT + select ARCH_HAS_DEBUG_VIRTUAL if !64BIT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_KEEP_MEMBLOCK if DEBUG_KERNEL + select ARCH_KEEP_MEMBLOCK select ARCH_SUPPORTS_UPROBES select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if 64BIT @@ -26,6 +28,7 @@ config MIPS select GENERIC_ATOMIC64 if !64BIT select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_FIND_FIRST_BIT select GENERIC_GETTIMEOFDAY select GENERIC_IOMAP select GENERIC_IRQ_PROBE @@ -91,7 +94,6 @@ config MIPS select PERF_USE_VMALLOC select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select RTC_LIB - select SET_FS select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS select ARCH_HAS_ELFCORE_COMPAT @@ -712,6 +714,7 @@ config SGI_IP27 select ARC_CMDLINE_ONLY select BOOT_ELF64 select DEFAULT_SGI_PARTITION + select FORCE_PCI select SYS_HAS_EARLY_PRINTK select HAVE_PCI select IRQ_MIPS_CPU @@ -774,6 +777,7 @@ config SGI_IP30 select BOOT_ELF64 select CEVT_R4K select CSRC_R4K + select FORCE_PCI select SYNC_R4K if SMP select ZONE_DMA32 select HAVE_PCI @@ -998,6 +1002,7 @@ config CAVIUM_OCTEON_SOC select NR_CPUS_DEFAULT_64 select MIPS_NR_CPU_NR_MAP_1024 select BUILTIN_DTB + select MTD select MTD_COMPLEX_MAPPINGS select SWIOTLB select SYS_SUPPORTS_RELOCATABLE @@ -2118,7 +2123,7 @@ config CPU_MIPS32 config CPU_MIPS64 bool default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R5 || \ - CPU_MIPS64_R6 + CPU_MIPS64_R6 || CPU_LOONGSON64 || CPU_CAVIUM_OCTEON # # These indicate the revision of the architecture @@ -2185,7 +2190,8 @@ config CPU_SUPPORTS_HUGEPAGES depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) config MIPS_PGD_C0_CONTEXT bool - default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP + depends on 64BIT + default y if (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP # # Set to y for ptrace access to watch registers. @@ -2219,23 +2225,6 @@ config 64BIT endchoice -config KVM_GUEST - bool "KVM Guest Kernel" - depends on CPU_MIPS32_R2 - depends on !64BIT && BROKEN_ON_SMP - help - Select this option if building a guest kernel for KVM (Trap & Emulate) - mode. - -config KVM_GUEST_TIMER_FREQ - int "Count/Compare Timer Frequency (MHz)" - depends on KVM_GUEST - default 100 - help - Set this to non-zero if building a guest kernel for KVM to skip RTC - emulation when determining guest CPU Frequency. Instead, the guest's - timer frequency is specified directly. - config MIPS_VA_BITS_48 bool "48 bits virtual memory" depends on 64BIT diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 7a8d94cdd493..f5832a49a881 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -77,6 +77,7 @@ config CMDLINE_OVERRIDE config SB1XXX_CORELIS bool "Corelis Debugger" depends on SIBYTE_SB1xxx_SOC + select DEBUG_KERNEL if !COMPILE_TEST select DEBUG_INFO if !COMPILE_TEST help Select compile flags that produce code that can be processed by the diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index f0c830337104..c01be8c45271 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -111,7 +111,7 @@ static struct clk_aliastable { /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ static spinlock_t alchemy_clk_fg0_lock; static spinlock_t alchemy_clk_fg1_lock; -static spinlock_t alchemy_clk_csrc_lock; +static DEFINE_SPINLOCK(alchemy_clk_csrc_lock); /* CPU Core clock *****************************************************/ @@ -996,7 +996,6 @@ static int __init alchemy_clk_setup_imux(int ctype) if (!a) return -ENOMEM; - spin_lock_init(&alchemy_clk_csrc_lock); ret = 0; for (i = 0; i < 6; i++) { diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 164115944a7f..5a3e325275d0 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -76,7 +76,7 @@ static struct clk clk_enet_misc = { }; /* - * Ethernet MAC clocks: only revelant on 6358, silently enable misc + * Ethernet MAC clocks: only relevant on 6358, silently enable misc * clocks */ static void enetx_set(struct clk *clk, int enable) diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c index 16f353ac3441..5c4a233db55f 100644 --- a/arch/mips/bcm63xx/gpio.c +++ b/arch/mips/bcm63xx/gpio.c @@ -43,8 +43,7 @@ static void bcm63xx_gpio_set(struct gpio_chip *chip, u32 *v; unsigned long flags; - if (gpio >= chip->ngpio) - BUG(); + BUG_ON(gpio >= chip->ngpio); if (gpio < 32) { reg = gpio_out_low_reg; @@ -70,8 +69,7 @@ static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio) u32 reg; u32 mask; - if (gpio >= chip->ngpio) - BUG(); + BUG_ON(gpio >= chip->ngpio); if (gpio < 32) { reg = gpio_out_low_reg; @@ -92,8 +90,7 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip, u32 tmp; unsigned long flags; - if (gpio >= chip->ngpio) - BUG(); + BUG_ON(gpio >= chip->ngpio); if (gpio < 32) { reg = GPIO_CTL_LO_REG; diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c index 49061b870680..915ce4b189c1 100644 --- a/arch/mips/bmips/dma.c +++ b/arch/mips/bmips/dma.c @@ -10,7 +10,7 @@ #include <linux/device.h> #include <linux/dma-direction.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index f93f72bcba97..e4b7839293e1 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -18,7 +18,7 @@ include $(srctree)/arch/mips/Kbuild.platforms BOOT_HEAP_SIZE := 0x400000 # Disable Function Tracer -KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS)) +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE), $(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) diff --git a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts index ed6023a91763..d702a843c74a 100644 --- a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts +++ b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm3368.dtsi" +#include "bcm3368.dtsi" / { compatible = "netgear,cvg834g", "brcm,bcm3368"; diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi index 69cbef472377..883ca8bed8e7 100644 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi @@ -1,4 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm3368-clock.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -59,7 +62,7 @@ periph_cntl: syscon@fff8c008 { compatible = "syscon"; - reg = <0xfff8c000 0x4>; + reg = <0xfff8c008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts index 8d010b919de2..b511bc7125d5 100644 --- a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts +++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm63268.dtsi" +#include "bcm63268.dtsi" / { compatible = "comtrend,vr-3032u", "brcm,bcm63268"; diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi index e0021ff9f144..c3ce49ec675f 100644 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm63268-clock.h" +#include "dt-bindings/reset/bcm63268-reset.h" +#include "dt-bindings/soc/bcm63268-pm.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +29,29 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; + }; + + hsspi_osc: hsspi-osc { + compatible = "fixed-clock"; + + #clock-cells = <0>; + + clock-frequency = <400000000>; + clock-output-names = "hsspi_osc"; }; }; aliases { + nflash = &nflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; + spi1 = &hsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +69,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm63268-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@10000008 { + pll_cntl: syscon@10000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; - }; - reboot: syscon-reboot@10000008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_rst: reset-controller@10000010 { @@ -88,6 +105,16 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000009c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000009c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + uart0: serial@10000180 { compatible = "brcm,bcm6345-uart"; reg = <0x10000180 0x18>; @@ -95,12 +122,34 @@ interrupt-parent = <&periph_intc>; interrupts = <5>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v4.0", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000600 0x200>, + <0x100000b0 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; + + interrupt-parent = <&periph_intc>; + interrupts = <50>; + + clocks = <&periph_clk BCM63268_CLK_NAND>; + clock-names = "nand"; + + status = "disabled"; + }; + uart1: serial@100001a0 { compatible = "brcm,bcm6345-uart"; reg = <0x100001a0 0x18>; @@ -108,17 +157,44 @@ interrupt-parent = <&periph_intc>; interrupts = <34>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; - leds0: led-controller@10001900 { + lsspi: spi@10000800 { #address-cells = <1>; #size-cells = <0>; - compatible = "brcm,bcm6328-leds"; - reg = <0x10001900 0x24>; + compatible = "brcm,bcm6358-spi"; + reg = <0x10000800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <80>; + + clocks = <&periph_clk BCM63268_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM63268_RST_SPI>; + + status = "disabled"; + }; + + hsspi: spi@10001000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-hsspi"; + reg = <0x10001000 0x600>; + + interrupt-parent = <&periph_intc>; + interrupts = <6>; + + clocks = <&periph_clk BCM63268_CLK_HSSPI>, + <&hsspi_osc>; + clock-names = "hsspi", + "pll"; + + resets = <&periph_rst BCM63268_RST_SPI>; status = "disabled"; }; @@ -129,6 +205,15 @@ #power-domain-cells = <1>; }; + leds0: led-controller@10001900 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-leds"; + reg = <0x10001900 0x24>; + + status = "disabled"; + }; + ehci: usb@10002500 { compatible = "brcm,bcm63268-ehci", "generic-ehci"; reg = <0x10002500 0x100>; @@ -137,6 +222,9 @@ interrupt-parent = <&periph_intc>; interrupts = <10>; + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -149,6 +237,25 @@ interrupt-parent = <&periph_intc>; interrupts = <9>; + phys = <&usbh 0>; + phy-names = "usb"; + + status = "disabled"; + }; + + usbh: usb-phy@10002700 { + compatible = "brcm,bcm63268-usbh-phy"; + reg = <0x10002700 0x38>; + #phy-cells = <1>; + + clocks = <&periph_clk BCM63268_CLK_USBH>; + clock-names = "usbh"; + + power-domains = <&periph_pwr BCM63268_POWER_DOMAIN_USBH>; + + resets = <&periph_rst BCM63268_RST_USBH>; + reset-names = "usbh"; + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi index 9dc558763c46..634618d4377e 100644 --- a/arch/mips/boot/dts/brcm/bcm6328.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6328-clock.h" +#include "dt-bindings/reset/bcm6328-reset.h" +#include "dt-bindings/soc/bcm6328-pm.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +29,26 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; + }; + + hsspi_osc: hsspi-osc { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <133333333>; + clock-output-names = "hsspi_osc"; }; }; aliases { + nflash = &nflash; serial0 = &uart0; serial1 = &uart1; + spi1 = &hsspi; }; cpu_intc: interrupt-controller { @@ -51,7 +66,7 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm6328-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; @@ -75,37 +90,71 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + + soft_reset: syscon@10000068 { + compatible = "syscon"; + reg = <0x10000068 0x4>; + native-endian; + + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; + }; + uart0: serial@10000100 { compatible = "brcm,bcm6345-uart"; reg = <0x10000100 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <28>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + status = "disabled"; }; uart1: serial@10000120 { compatible = "brcm,bcm6345-uart"; reg = <0x10000120 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <39>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + status = "disabled"; }; - timer: syscon@10000040 { - compatible = "syscon"; - reg = <0x10000040 0x2c>; - native-endian; - }; + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v2.2", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000400 0x200>, + <0x10000070 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; - reboot: syscon-reboot@10000068 { - compatible = "syscon-reboot"; - regmap = <&timer>; - offset = <0x28>; - mask = <0x1>; + interrupt-parent = <&periph_intc>; + interrupts = <0>; + + status = "disabled"; }; leds0: led-controller@10000800 { @@ -113,6 +162,27 @@ #size-cells = <0>; compatible = "brcm,bcm6328-leds"; reg = <0x10000800 0x24>; + + status = "disabled"; + }; + + hsspi: spi@10001000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-hsspi"; + reg = <0x10001000 0x600>; + + interrupt-parent = <&periph_intc>; + interrupts = <29>; + + clocks = <&periph_clk BCM6328_CLK_HSSPI>, + <&hsspi_osc>; + clock-names = "hsspi", + "pll"; + + resets = <&periph_rst BCM6328_RST_SPI>; + reset-names = "hsspi"; + status = "disabled"; }; @@ -126,8 +196,13 @@ compatible = "brcm,bcm6328-ehci", "generic-ehci"; reg = <0x10002500 0x100>; big-endian; + interrupt-parent = <&periph_intc>; interrupts = <42>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -136,8 +211,29 @@ reg = <0x10002600 0x100>; big-endian; no-big-frame-no; + interrupt-parent = <&periph_intc>; interrupts = <41>; + + phys = <&usbh 0>; + phy-names = "usb"; + + status = "disabled"; + }; + + usbh: usb-phy@10002700 { + compatible = "brcm,bcm6328-usbh-phy"; + reg = <0x10002700 0x38>; + #phy-cells = <1>; + + clocks = <&periph_clk BCM6328_CLK_USBH>; + clock-names = "usbh"; + + power-domains = <&periph_pwr BCM6328_POWER_DOMAIN_USBH>; + + resets = <&periph_rst BCM6328_RST_USBH>; + reset-names = "usbh"; + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts index 53e57cc29291..c646690ee3df 100644 --- a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts +++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6358.dtsi" +#include "bcm6358.dtsi" / { compatible = "sfr,nb4-ser", "brcm,bcm6358"; diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi index 9d93e7f5e6fc..777c4379ed03 100644 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi @@ -1,4 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6358-clock.h" +#include "dt-bindings/reset/bcm6358-reset.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +28,19 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; }; }; aliases { + pflash = &pflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +58,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@fffe0004 { + periph_clk: clock-controller@fffe0004 { compatible = "brcm,bcm6358-clocks"; reg = <0xfffe0004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@fffe0008 { + pll_cntl: syscon@fffe0008 { compatible = "syscon"; - reg = <0xfffe0000 0x4>; + reg = <0xfffe0008 0x4>; native-endian; - }; - reboot: syscon-reboot@fffe0008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_intc: interrupt-controller@fffe000c { @@ -88,6 +94,16 @@ #reset-cells = <1>; }; + wdt: watchdog@fffe005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0xfffe005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + leds0: led-controller@fffe00d0 { #address-cells = <1>; #size-cells = <0>; @@ -104,7 +120,7 @@ interrupt-parent = <&periph_intc>; interrupts = <2>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; @@ -117,18 +133,41 @@ interrupt-parent = <&periph_intc>; interrupts = <3>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; + lsspi: spi@fffe0800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6358-spi"; + reg = <0xfffe0800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <1>; + + clocks = <&periph_clk BCM6358_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM6358_RST_SPI>; + reset-names = "spi"; + + status = "disabled"; + }; + ehci: usb@fffe1300 { compatible = "brcm,bcm6358-ehci", "generic-ehci"; reg = <0xfffe1300 0x100>; big-endian; + interrupt-parent = <&periph_intc>; interrupts = <10>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -137,9 +176,35 @@ reg = <0xfffe1400 0x100>; big-endian; no-big-frame-no; + interrupt-parent = <&periph_intc>; interrupts = <5>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; + + usbh: usb-phy@fffe1500 { + compatible = "brcm,bcm6358-usbh-phy"; + reg = <0xfffe1500 0x38>; + #phy-cells = <1>; + + resets = <&periph_rst BCM6358_RST_USBH>; + reset-names = "usbh"; + + status = "disabled"; + }; + }; + + pflash: nor@1e000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x1e000000 0x2000000>; + bank-width = <2>; + + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts index 3e83bee5b91e..f83d95ca0514 100644 --- a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts +++ b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6362.dtsi" +#include "bcm6362.dtsi" / { compatible = "sfr,nb6-ser", "brcm,bcm6362"; diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi index eb10341b75ba..d74021925c53 100644 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6362-clock.h" +#include "dt-bindings/reset/bcm6362-reset.h" +#include "dt-bindings/soc/bcm6362-pm.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +29,29 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; + }; + + hsspi_osc: hsspi-osc { + compatible = "fixed-clock"; + + #clock-cells = <0>; + + clock-frequency = <400000000>; + clock-output-names = "hsspi_osc"; }; }; aliases { + nflash = &nflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; + spi1 = &hsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +69,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm6362-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@10000008 { + pll_cntl: syscon@10000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; - }; - reboot: syscon-reboot@10000008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_rst: reset-controller@10000010 { @@ -88,6 +105,16 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + uart0: serial@10000100 { compatible = "brcm,bcm6345-uart"; reg = <0x10000100 0x18>; @@ -95,7 +122,7 @@ interrupt-parent = <&periph_intc>; interrupts = <3>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; @@ -108,12 +135,72 @@ interrupt-parent = <&periph_intc>; interrupts = <4>; - clocks = <&periph_clk>; + clocks = <&periph_osc>; clock-names = "refclk"; status = "disabled"; }; + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v2.2", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000600 0x200>, + <0x10000070 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; + + interrupt-parent = <&periph_intc>; + interrupts = <12>; + + clocks = <&periph_clk BCM6362_CLK_NAND>; + clock-names = "nand"; + + status = "disabled"; + }; + + lsspi: spi@10000800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6358-spi"; + reg = <0x10000800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <2>; + + clocks = <&periph_clk BCM6362_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM6362_RST_SPI>; + reset-names = "spi"; + + status = "disabled"; + }; + + hsspi: spi@10001000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6328-hsspi"; + reg = <0x10001000 0x600>; + + interrupt-parent = <&periph_intc>; + interrupts = <5>; + + clocks = <&periph_clk BCM6362_CLK_HSSPI>, + <&hsspi_osc>; + clock-names = "hsspi", + "pll"; + + resets = <&periph_rst BCM6362_RST_SPI>; + reset-names = "hsspi"; + + status = "disabled"; + }; + periph_pwr: power-controller@10001848 { compatible = "brcm,bcm6362-power-controller"; reg = <0x10001848 0x4>; @@ -137,6 +224,9 @@ interrupt-parent = <&periph_intc>; interrupts = <10>; + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -149,6 +239,26 @@ interrupt-parent = <&periph_intc>; interrupts = <9>; + phys = <&usbh 0>; + phy-names = "usb"; + + status = "disabled"; + }; + + usbh: usb-phy@10002700 { + compatible = "brcm,bcm6362-usbh-phy"; + reg = <0x10002700 0x38>; + + #phy-cells = <1>; + + clocks = <&periph_clk BCM6362_CLK_USBH>; + clock-names = "usbh"; + + power-domains = <&periph_pwr BCM6362_POWER_DOMAIN_USBH>; + + resets = <&periph_rst BCM6362_RST_USBH>; + reset-names = "usbh"; + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi index 52c19f40b9cc..fc15e200877d 100644 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi @@ -1,4 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 + +#include "dt-bindings/clock/bcm6368-clock.h" +#include "dt-bindings/reset/bcm6368-reset.h" + / { #address-cells = <1>; #size-cells = <1>; @@ -24,16 +28,20 @@ }; clocks { - periph_clk: periph-clk { + periph_osc: periph-osc { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; + clock-output-names = "periph"; }; }; aliases { + nflash = &nflash; + pflash = &pflash; serial0 = &uart0; serial1 = &uart1; + spi0 = &lsspi; }; cpu_intc: interrupt-controller { @@ -51,23 +59,22 @@ compatible = "simple-bus"; ranges; - clkctl: clock-controller@10000004 { + periph_clk: clock-controller@10000004 { compatible = "brcm,bcm6368-clocks"; reg = <0x10000004 0x4>; #clock-cells = <1>; }; - periph_cntl: syscon@100000008 { + pll_cntl: syscon@100000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; - }; - reboot: syscon-reboot@10000008 { - compatible = "syscon-reboot"; - regmap = <&periph_cntl>; - offset = <0x0>; - mask = <0x1>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x1>; + }; }; periph_rst: reset-controller@10000010 { @@ -88,31 +95,88 @@ interrupts = <2>, <3>; }; + wdt: watchdog@1000005c { + compatible = "brcm,bcm7038-wdt"; + reg = <0x1000005c 0xc>; + + clocks = <&periph_osc>; + clock-names = "refclk"; + + timeout-sec = <30>; + }; + leds0: led-controller@100000d0 { #address-cells = <1>; #size-cells = <0>; compatible = "brcm,bcm6358-leds"; reg = <0x100000d0 0x8>; + status = "disabled"; }; uart0: serial@10000100 { compatible = "brcm,bcm6345-uart"; reg = <0x10000100 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <2>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + status = "disabled"; }; uart1: serial@10000120 { compatible = "brcm,bcm6345-uart"; reg = <0x10000120 0x18>; + interrupt-parent = <&periph_intc>; interrupts = <3>; - clocks = <&periph_clk>; + + clocks = <&periph_osc>; clock-names = "refclk"; + + status = "disabled"; + }; + + nflash: nand@10000200 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm6368", + "brcm,brcmnand-v2.1", + "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x10000600 0x200>, + <0x10000070 0x10>; + reg-names = "nand", + "nand-cache", + "nand-int-base"; + + interrupt-parent = <&periph_intc>; + interrupts = <10>; + + clocks = <&periph_clk BCM6368_CLK_NAND>; + clock-names = "nand"; + + status = "disabled"; + }; + + lsspi: spi@10000800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm6358-spi"; + reg = <0x10000800 0x70c>; + + interrupt-parent = <&periph_intc>; + interrupts = <1>; + + clocks = <&periph_clk BCM6368_CLK_SPI>; + clock-names = "spi"; + + resets = <&periph_rst BCM6368_RST_SPI>; + reset-names = "spi"; + status = "disabled"; }; @@ -120,8 +184,13 @@ compatible = "brcm,bcm6368-ehci", "generic-ehci"; reg = <0x10001500 0x100>; big-endian; + interrupt-parent = <&periph_intc>; interrupts = <7>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; @@ -130,9 +199,49 @@ reg = <0x10001600 0x100>; big-endian; no-big-frame-no; + interrupt-parent = <&periph_intc>; interrupts = <5>; + + phys = <&usbh 0>; + phy-names = "usb"; + status = "disabled"; }; + + usbh: usb-phy@10001700 { + compatible = "brcm,bcm6368-usbh-phy"; + reg = <0x10001700 0x38>; + #phy-cells = <1>; + + clocks = <&periph_clk BCM6368_CLK_USBH>; + clock-names = "usbh"; + + resets = <&periph_rst BCM6368_RST_USBH>; + reset-names = "usbh"; + + status = "disabled"; + }; + + random: rng@10004180 { + compatible = "brcm,bcm6368-rng"; + reg = <0x10004180 0x14>; + + clocks = <&periph_clk BCM6368_CLK_IPSEC>; + clock-names = "ipsec"; + + resets = <&periph_rst BCM6368_RST_IPSEC>; + reset-names = "ipsec"; + }; + }; + + pflash: nor@18000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x18000000 0x2000000>; + bank-width = <2>; + + status = "disabled"; }; }; diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg.dts b/arch/mips/boot/dts/brcm/bcm93384wvg.dts index 601e4d9293ab..7d3f181b8980 100644 --- a/arch/mips/boot/dts/brcm/bcm93384wvg.dts +++ b/arch/mips/boot/dts/brcm/bcm93384wvg.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm3384_zephyr.dtsi" +#include "bcm3384_zephyr.dtsi" / { compatible = "brcm,bcm93384wvg", "brcm,bcm3384"; diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts index 938a8e66128c..f845faa0d682 100644 --- a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts +++ b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm3384_viper.dtsi" +#include "bcm3384_viper.dtsi" / { compatible = "brcm,bcm93384wvg-viper", "brcm,bcm3384-viper"; diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts index 6d772c394e41..f5e955085308 100644 --- a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts +++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6368.dtsi" +#include "bcm6368.dtsi" / { compatible = "brcm,bcm96368mvwg", "brcm,bcm6368"; diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts index 79e9769f7e00..bda5f796251a 100644 --- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7125.dtsi" +#include "bcm7125.dtsi" / { compatible = "brcm,bcm97125cbmb", "brcm,bcm7125"; diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts index 28370ff77eeb..9f73735e815c 100644 --- a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7346.dtsi" -/include/ "bcm97xxx-nand-cs1-bch24.dtsi" +#include "bcm7346.dtsi" +#include "bcm97xxx-nand-cs1-bch24.dtsi" / { compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346"; diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts index 41c1b510c230..522f2c40d6e6 100644 --- a/arch/mips/boot/dts/brcm/bcm97358svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7358.dtsi" -/include/ "bcm97xxx-nand-cs1-bch4.dtsi" +#include "bcm7358.dtsi" +#include "bcm97xxx-nand-cs1-bch4.dtsi" / { compatible = "brcm,bcm97358svmb", "brcm,bcm7358"; diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts index 9f6c6c9b7ea7..01f215b08dba 100644 --- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7360.dtsi" +#include "bcm7360.dtsi" / { compatible = "brcm,bcm97360svmb", "brcm,bcm7360"; diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts index df8b755c390f..97aeb51b6831 100644 --- a/arch/mips/boot/dts/brcm/bcm97362svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7362.dtsi" -/include/ "bcm97xxx-nand-cs1-bch4.dtsi" +#include "bcm7362.dtsi" +#include "bcm97xxx-nand-cs1-bch4.dtsi" / { compatible = "brcm,bcm97362svmb", "brcm,bcm7362"; diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts index 086faeaa384a..cc70c2dd4d85 100644 --- a/arch/mips/boot/dts/brcm/bcm97420c.dts +++ b/arch/mips/boot/dts/brcm/bcm97420c.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7420.dtsi" +#include "bcm7420.dtsi" / { compatible = "brcm,bcm97420c", "brcm,bcm7420"; diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts index 0ed22217bf3a..9efecfe1e05c 100644 --- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7425.dtsi" -/include/ "bcm97xxx-nand-cs1-bch24.dtsi" +#include "bcm7425.dtsi" +#include "bcm97xxx-nand-cs1-bch24.dtsi" / { compatible = "brcm,bcm97425svmb", "brcm,bcm7425"; diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts index 2c145a883aef..b653c6ff74b5 100644 --- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts +++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm7435.dtsi" -/include/ "bcm97xxx-nand-cs1-bch24.dtsi" +#include "bcm7435.dtsi" +#include "bcm97xxx-nand-cs1-bch24.dtsi" / { compatible = "brcm,bcm97435svmb", "brcm,bcm7435"; diff --git a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts index 8d58c1971b30..615d2b97770e 100644 --- a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts +++ b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "bcm6328.dtsi" +#include "bcm6328.dtsi" / { compatible = "brcm,bcm9ejtagprb", "brcm,bcm6328"; diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts index bc72304a2440..f4c04f2263ea 100644 --- a/arch/mips/boot/dts/ingenic/gcw0.dts +++ b/arch/mips/boot/dts/ingenic/gcw0.dts @@ -345,7 +345,6 @@ spi-max-frequency = <3125000>; spi-3wire; - spi-cs-high; reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>; diff --git a/arch/mips/boot/dts/loongson/Makefile b/arch/mips/boot/dts/loongson/Makefile index 8fd0efb37423..72267bfda9b4 100644 --- a/arch/mips/boot/dts/loongson/Makefile +++ b/arch/mips/boot/dts/loongson/Makefile @@ -1,4 +1,5 @@ # SPDX_License_Identifier: GPL_2.0 +dtb-$(CONFIG_MACH_LOONGSON64) += loongson64_2core_2k1000.dtb dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_ls7a.dtb dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_8core_rs780e.dtb diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi new file mode 100644 index 000000000000..569e814def83 --- /dev/null +++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 + +/dts-v1/; + +#include <dt-bindings/interrupt-controller/irq.h> + +/ { + compatible = "loongson,loongson2k1000"; + + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "loongson,gs264"; + reg = <0x0>; + #clock-cells = <1>; + clocks = <&cpu_clk>; + }; + }; + + memory { + compatible = "memory"; + device_type = "memory"; + reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */ + <0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */ + <0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */ + }; + + cpu_clk: cpu_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <800000000>; + }; + + cpuintc: interrupt-controller { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + package0: bus@10000000 { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0 0x10000000 0 0x10000000 0 0x10000000 /* ioports */ + 0 0x40000000 0 0x40000000 0 0x40000000 + 0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>; + + liointc0: interrupt-controller@1fe11400 { + compatible = "loongson,liointc-2.0"; + reg = <0 0x1fe11400 0 0x40>, + <0 0x1fe11040 0 0x8>, + <0 0x1fe11140 0 0x8>; + reg-names = "main", "isr0", "isr1"; + + interrupt-controller; + #interrupt-cells = <2>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + interrupt-names = "int0"; + + loongson,parent_int_map = <0xffffffff>, /* int0 */ + <0x00000000>, /* int1 */ + <0x00000000>, /* int2 */ + <0x00000000>; /* int3 */ + }; + + liointc1: interrupt-controller@1fe11440 { + compatible = "loongson,liointc-2.0"; + reg = <0 0x1fe11440 0 0x40>, + <0 0x1fe11048 0 0x8>, + <0 0x1fe11148 0 0x8>; + reg-names = "main", "isr0", "isr1"; + + interrupt-controller; + #interrupt-cells = <2>; + + interrupt-parent = <&cpuintc>; + interrupts = <3>; + interrupt-names = "int1"; + + loongson,parent_int_map = <0x00000000>, /* int0 */ + <0xffffffff>, /* int1 */ + <0x00000000>, /* int2 */ + <0x00000000>; /* int3 */ + }; + + uart0: serial@1fe00000 { + compatible = "ns16550a"; + reg = <0 0x1fe00000 0 0x8>; + clock-frequency = <125000000>; + interrupt-parent = <&liointc0>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + no-loopback-test; + }; + + pci@1a000000 { + compatible = "loongson,ls2k-pci"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <2>; + + reg = <0 0x1a000000 0 0x02000000>, + <0xfe 0x00000000 0 0x20000000>; + + ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000 0x0 0x00010000>, + <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>; + + ehci@4,1 { + compatible = "pci0014,7a14.0", + "pci0014,7a14", + "pciclass0c0320", + "pciclass0c03"; + + reg = <0x2100 0x0 0x0 0x0 0x0>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + }; + + ohci@4,2 { + compatible = "pci0014,7a24.0", + "pci0014,7a24", + "pciclass0c0310", + "pciclass0c03"; + + reg = <0x2200 0x0 0x0 0x0 0x0>; + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + }; + + sata@8,0 { + compatible = "pci0014,7a08.0", + "pci0014,7a08", + "pciclass010601", + "pciclass0106"; + + reg = <0x4000 0x0 0x0 0x0 0x0>; + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc0>; + }; + + pci_bridge@9,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x4800 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@a,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x5000 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@b,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x5800 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@c,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x6000 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@d,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x6800 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + pci_bridge@e,0 { + compatible = "pci0014,7a19.0", + "pci0014,7a19", + "pciclass060400", + "pciclass0604"; + + reg = <0x7000 0x0 0x0 0x0 0x0>; + #interrupt-cells = <1>; + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&liointc1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>; + external-facing; + }; + + }; + }; +}; + diff --git a/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts b/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts new file mode 100644 index 000000000000..e31d2ee65cd5 --- /dev/null +++ b/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +/dts-v1/; + +#include "loongson64-2k1000.dtsi" + +/ { + compatible = "loongson,loongson64-2core-2k1000"; +}; + diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c index 99e27155b399..6a4694538bb6 100644 --- a/arch/mips/cavium-octeon/oct_ilm.c +++ b/arch/mips/cavium-octeon/oct_ilm.c @@ -62,7 +62,7 @@ static int reset_statistics(void *data, u64 value) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); static void init_debugfs(void) { diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 0a7c9834b81c..600d018cf354 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -150,8 +150,12 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ __memcpy: -FEXPORT(__copy_user) -EXPORT_SYMBOL(__copy_user) +FEXPORT(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) +FEXPORT(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +FEXPORT(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) /* * Note: dst & src may be unaligned, len may be 0 * Temps diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index eea9b613bb74..d83e7d600b0a 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -105,10 +105,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_EEPROM_LEGACY=y CONFIG_EEPROM_MAX6875=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_IDETAPE=y -CONFIG_BLK_DEV_TC86C001=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig new file mode 100644 index 000000000000..e948ca487e2d --- /dev/null +++ b/arch/mips/configs/loongson2k_defconfig @@ -0,0 +1,353 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZMA=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SYSFS_DEPRECATED=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EMBEDDED=y +CONFIG_MACH_LOONGSON64=y +# CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION is not set +CONFIG_HZ_256=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_MQ_IOSCHED_DEADLINE=m +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m +CONFIG_KSM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_IP_VS=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_IP_SCTP=m +CONFIG_L2TP=m +CONFIG_BRIDGE=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEASPM_PERFORMANCE=y +CONFIG_HOTPLUG_PCI=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_MTD=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_RAID_ATTRS=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_PATA_ATIIXP=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NETDEVICES=y +CONFIG_TUN=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IXGB=y +CONFIG_IXGBE=y +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_ATH9K=m +CONFIG_HOSTAP=m +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_SPARSEKMAP=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_XTKBD=m +# CONFIG_MOUSE_PS2 is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_HW_RANDOM=y +CONFIG_RAW_DRIVER=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_PIIX4=y +CONFIG_GPIO_LOONGSON=y +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_W83627HF=m +# CONFIG_MEDIA_CEC_SUPPORT is not set +CONFIG_MEDIA_SUPPORT=m +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_FB_RADEON=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_LCD_PLATFORM=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQ_DUMMY=m +# CONFIG_SND_ISA is not set +CONFIG_SND_HDA_INTEL=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_ANALOG=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_VIA=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +# CONFIG_SND_USB is not set +CONFIG_SND_SOC=y +CONFIG_HID_A4TECH=m +CONFIG_HID_SUNPLUS=m +CONFIG_USB=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=m +CONFIG_USB_STORAGE=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_DMADEVICES=y +# CONFIG_CPU_HWMON is not set +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_CIFS=m +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_PRINTK_TIME=y +CONFIG_FRAME_WARN=1024 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 0e79f81217bc..a18609cf0e5e 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -26,6 +26,7 @@ CONFIG_SCHED_AUTOGROUP=y CONFIG_SYSFS_DEPRECATED=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_BPF_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y CONFIG_MACH_LOONGSON64=y @@ -39,7 +40,7 @@ CONFIG_MIPS32_O32=y CONFIG_MIPS32_N32=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=m -CONFIG_KVM_MIPS_VZ=y +CONFIG_KPROBES=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y @@ -129,6 +130,7 @@ CONFIG_L2TP=m CONFIG_BRIDGE=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_BPF_JIT=y CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -318,6 +320,7 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m CONFIG_USB_STORAGE=m CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_SERIAL_OPTION=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y @@ -405,8 +408,10 @@ CONFIG_CRYPTO_DEFLATE=m CONFIG_PRINTK_TIME=y CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FTRACE_SYSCALLS=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="ieee754=relaxed" diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index ab8d1df0f255..5924e48fd3ec 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -238,9 +238,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig deleted file mode 100644 index 9185e0a0aa45..000000000000 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ /dev/null @@ -1,436 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_LOG_BUF_SHIFT=15 -CONFIG_NAMESPACES=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_KVM_GUEST=y -CONFIG_PAGE_SIZE_16KB=y -# CONFIG_MIPS_MT_SMP is not set -CONFIG_HZ_100=y -CONFIG_PCI=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=m -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_SCTP=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_PHONET=m -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_CFG80211=m -CONFIG_MAC80211=m -CONFIG_MAC80211_MESH=y -CONFIG_RFKILL=m -CONFIG_DEVTMPFS=y -CONFIG_CONNECTOR=m -CONFIG_MTD=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_OOPS=m -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_INTELEXT=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_STAA=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_GLUEBI=m -CONFIG_BLK_DEV_FD=m -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=m -CONFIG_VIRTIO_BLK=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m -CONFIG_RAID_ATTRS=m -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_FC_ATTRS=m -CONFIG_ISCSI_TCP=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_ATA=y -CONFIG_ATA_PIIX=y -CONFIG_PATA_IT8213=m -CONFIG_PATA_OLDPIIX=y -CONFIG_PATA_MPIIX=y -CONFIG_ATA_GENERIC=y -CONFIG_PATA_LEGACY=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -CONFIG_IFB=m -CONFIG_MACVLAN=m -CONFIG_TUN=m -CONFIG_VETH=m -CONFIG_VIRTIO_NET=y -CONFIG_PCNET32=y -CONFIG_CHELSIO_T3=m -CONFIG_AX88796=m -CONFIG_NETXEN_NIC=m -CONFIG_TC35815=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_PRISM54=m -CONFIG_LIBERTAS=m -CONFIG_INPUT_MOUSEDEV=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_PIIX4_POWEROFF=y -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_CIRRUS=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_HID=m -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_CMOS=y -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_MMIO=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_REISERFS_FS=m -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_QUOTA=y -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=m -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_AFFS_FS=m -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_JFFS2_FS=m -CONFIG_JFFS2_FS_XATTR=y -CONFIG_JFFS2_COMPRESSION_OPTIONS=y -CONFIG_JFFS2_RUBIN=y -CONFIG_CRAMFS=m -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_ROMFS_FS=m -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index c93e5a39c215..c0d3156ef640 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -236,9 +236,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig index 5e389db35fa7..69f2300107f9 100644 --- a/arch/mips/configs/rbtx49xx_defconfig +++ b/arch/mips/configs/rbtx49xx_defconfig @@ -44,9 +44,6 @@ CONFIG_MTD_NAND_TXX9NDFMC=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDE_TX4938=y -CONFIG_BLK_DEV_IDE_TX4939=y CONFIG_NETDEVICES=y CONFIG_NE2000=y CONFIG_SMC91X=y diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig index bb0b1b22ebe1..de94bf756a93 100644 --- a/arch/mips/configs/sb1250_swarm_defconfig +++ b/arch/mips/configs/sb1250_swarm_defconfig @@ -17,7 +17,6 @@ CONFIG_64BIT=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_HZ_1000=y -CONFIG_PCI=y CONFIG_MIPS32_O32=y CONFIG_PM=y CONFIG_MODULES=y @@ -34,25 +33,28 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m CONFIG_TCP_MD5SIG=y # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_RFKILL=m +CONFIG_PCI=y CONFIG_FW_LOADER=m CONFIG_CONNECTOR=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=9220 CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_IDETAPE=y CONFIG_RAID_ATTRS=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +# CONFIG_ATA_BMDMA is not set +CONFIG_PATA_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_MACVLAN=m CONFIG_SB1250_MAC=y @@ -88,18 +90,14 @@ CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_LZO=m diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig index 891a5f77305d..4798dc86c9ce 100644 --- a/arch/mips/configs/workpad_defconfig +++ b/arch/mips/configs/workpad_defconfig @@ -26,9 +26,12 @@ CONFIG_IP_MULTICAST=y # CONFIG_IPV6 is not set CONFIG_NETWORK_SECMARK=y CONFIG_BLK_DEV_RAM=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECS=m -CONFIG_IDE_GENERIC=y +# CONFIG_SCSI_PROC_FS is not set +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +# CONFIG_ATA_VERBOSE_ERROR is not set +# CONFIG_ATA_FORCE is not set +# CONFIG_ATA_BMDMA is not set CONFIG_NETDEVICES=y CONFIG_PCMCIA_3C574=m CONFIG_PCMCIA_3C589=m diff --git a/arch/mips/crypto/.gitignore b/arch/mips/crypto/.gitignore new file mode 100644 index 000000000000..0d47d4f21c6d --- /dev/null +++ b/arch/mips/crypto/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-core.S diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S index a7f51f97b910..c45ad2759421 100644 --- a/arch/mips/generic/board-boston.its.S +++ b/arch/mips/generic/board-boston.its.S @@ -1,22 +1,22 @@ / { images { - fdt@boston { + fdt-boston { description = "img,boston Device Tree"; data = /incbin/("boot/dts/img/boston.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@boston { + conf-boston { description = "Boston Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@boston"; + kernel = "kernel"; + fdt = "fdt-boston"; }; }; }; diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S index fb0e589eeff7..c2b8d479b26c 100644 --- a/arch/mips/generic/board-jaguar2.its.S +++ b/arch/mips/generic/board-jaguar2.its.S @@ -1,23 +1,23 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@jaguar2_pcb110 { + fdt-jaguar2_pcb110 { description = "MSCC Jaguar2 PCB110 Device Tree"; data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; - fdt@jaguar2_pcb111 { + fdt-jaguar2_pcb111 { description = "MSCC Jaguar2 PCB111 Device Tree"; data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; @@ -26,14 +26,14 @@ configurations { pcb110 { description = "Jaguar2 Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@jaguar2_pcb110"; + kernel = "kernel"; + fdt = "fdt-jaguar2_pcb110"; ramdisk = "ramdisk"; }; pcb111 { description = "Jaguar2 Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@jaguar2_pcb111"; + kernel = "kernel"; + fdt = "fdt-jaguar2_pcb111"; ramdisk = "ramdisk"; }; }; diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S index 39a543f62f25..bd9837c9af97 100644 --- a/arch/mips/generic/board-luton.its.S +++ b/arch/mips/generic/board-luton.its.S @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@luton_pcb091 { + fdt-luton_pcb091 { description = "MSCC Luton PCB091 Device Tree"; data = /incbin/("boot/dts/mscc/luton_pcb091.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; @@ -16,8 +16,8 @@ configurations { pcb091 { description = "Luton Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@luton_pcb091"; + kernel = "kernel"; + fdt = "fdt-luton_pcb091"; }; }; }; diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S index e4cb4f95a8cc..0a2e8f7a8526 100644 --- a/arch/mips/generic/board-ni169445.its.S +++ b/arch/mips/generic/board-ni169445.its.S @@ -1,22 +1,22 @@ / { images { - fdt@ni169445 { + fdt-ni169445 { description = "NI 169445 device tree"; data = /incbin/("boot/dts/ni/169445.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@ni169445 { + conf-ni169445 { description = "NI 169445 Linux Kernel"; - kernel = "kernel@0"; - fdt = "fdt@ni169445"; + kernel = "kernel"; + fdt = "fdt-ni169445"; }; }; }; diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S index 3da23988149a..8c7e3a1b68d3 100644 --- a/arch/mips/generic/board-ocelot.its.S +++ b/arch/mips/generic/board-ocelot.its.S @@ -1,40 +1,40 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@ocelot_pcb123 { + fdt-ocelot_pcb123 { description = "MSCC Ocelot PCB123 Device Tree"; data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; - fdt@ocelot_pcb120 { + fdt-ocelot_pcb120 { description = "MSCC Ocelot PCB120 Device Tree"; data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@ocelot_pcb123 { + conf-ocelot_pcb123 { description = "Ocelot Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@ocelot_pcb123"; + kernel = "kernel"; + fdt = "fdt-ocelot_pcb123"; }; - conf@ocelot_pcb120 { + conf-ocelot_pcb120 { description = "Ocelot Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@ocelot_pcb120"; + kernel = "kernel"; + fdt = "fdt-ocelot_pcb120"; }; }; }; diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S index 4ea4fc9d757f..dde833efe980 100644 --- a/arch/mips/generic/board-serval.its.S +++ b/arch/mips/generic/board-serval.its.S @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ / { images { - fdt@serval_pcb105 { + fdt-serval_pcb105 { description = "MSCC Serval PCB105 Device Tree"; data = /incbin/("boot/dts/mscc/serval_pcb105.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; @@ -16,8 +16,8 @@ configurations { pcb105 { description = "Serval Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@serval_pcb105"; + kernel = "kernel"; + fdt = "fdt-serval_pcb105"; ramdisk = "ramdisk"; }; }; diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S index a2e773d3f14f..08c1e900eb4e 100644 --- a/arch/mips/generic/board-xilfpga.its.S +++ b/arch/mips/generic/board-xilfpga.its.S @@ -1,22 +1,22 @@ / { images { - fdt@xilfpga { + fdt-xilfpga { description = "MIPSfpga (xilfpga) Device Tree"; data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb"); type = "flat_dt"; arch = "mips"; compression = "none"; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - conf@xilfpga { + conf-xilfpga { description = "MIPSfpga Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@xilfpga"; + kernel = "kernel"; + fdt = "fdt-xilfpga"; }; }; }; diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S index 1a08438fd893..3e254676540f 100644 --- a/arch/mips/generic/vmlinux.its.S +++ b/arch/mips/generic/vmlinux.its.S @@ -6,7 +6,7 @@ #address-cells = <ADDR_CELLS>; images { - kernel@0 { + kernel { description = KERNEL_NAME; data = /incbin/(VMLINUX_BINARY); type = "kernel"; @@ -15,18 +15,18 @@ compression = VMLINUX_COMPRESSION; load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>; entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>; - hash@0 { + hash { algo = "sha1"; }; }; }; configurations { - default = "conf@default"; + default = "conf-default"; - conf@default { + conf-default { description = "Generic Linux kernel"; - kernel = "kernel@0"; + kernel = "kernel"; }; }; }; diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 8f6fe69674b7..dee172716581 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,9 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # MIPS headers -generated-y += syscall_table_32_o32.h -generated-y += syscall_table_64_n32.h -generated-y += syscall_table_64_n64.h -generated-y += syscall_table_64_o32.h +generated-y += syscall_table_n32.h +generated-y += syscall_table_n64.h +generated-y += syscall_table_o32.h generated-y += unistd_nr_n32.h generated-y += unistd_nr_n64.h generated-y += unistd_nr_o32.h diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 86f2323ebe6b..ca83ada7015f 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -44,8 +44,7 @@ .endm #endif -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ - defined(CONFIG_CPU_MIPSR6) +#ifdef CONFIG_CPU_HAS_DIEI .macro local_irq_enable reg=t0 ei irq_enable_hazard diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 5be10ece3ef0..4c2e8173e6ec 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -107,7 +107,7 @@ extern void (*free_init_pages_eva)(void *begin, void *end); extern char arcs_cmdline[COMMAND_LINE_SIZE]; /* - * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware + * Registers a0, a1, a2 and a3 as passed to the kernel entry by firmware */ extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h index dc5ea5736440..ceece76fc971 100644 --- a/arch/mips/include/asm/div64.h +++ b/arch/mips/include/asm/div64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2000, 2004 Maciej W. Rozycki + * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) * * This file is subject to the terms and conditions of the GNU General Public @@ -9,25 +9,18 @@ #ifndef __ASM_DIV64_H #define __ASM_DIV64_H -#include <asm-generic/div64.h> - -#if BITS_PER_LONG == 64 +#include <asm/bitsperlong.h> -#include <linux/types.h> +#if BITS_PER_LONG == 32 /* * No traps on overflows for any of these... */ -#define __div64_32(n, base) \ -({ \ +#define do_div64_32(res, high, low, base) ({ \ unsigned long __cf, __tmp, __tmp2, __i; \ unsigned long __quot32, __mod32; \ - unsigned long __high, __low; \ - unsigned long long __n; \ \ - __high = *__n >> 32; \ - __low = __n; \ __asm__( \ " .set push \n" \ " .set noat \n" \ @@ -51,18 +44,48 @@ " subu %0, %0, %z6 \n" \ " addiu %2, %2, 1 \n" \ "3: \n" \ - " bnez %4, 0b\n\t" \ - " srl %5, %1, 0x1f\n\t" \ + " bnez %4, 0b \n" \ + " srl %5, %1, 0x1f \n" \ " .set pop" \ : "=&r" (__mod32), "=&r" (__tmp), \ "=&r" (__quot32), "=&r" (__cf), \ "=&r" (__i), "=&r" (__tmp2) \ - : "Jr" (base), "0" (__high), "1" (__low)); \ + : "Jr" (base), "0" (high), "1" (low)); \ \ - (__n) = __quot32; \ + (res) = __quot32; \ __mod32; \ }) -#endif /* BITS_PER_LONG == 64 */ +#define __div64_32(n, base) ({ \ + unsigned long __upper, __low, __high, __radix; \ + unsigned long long __quot; \ + unsigned long long __div; \ + unsigned long __mod; \ + \ + __div = (*n); \ + __radix = (base); \ + \ + __high = __div >> 32; \ + __low = __div; \ + \ + if (__high < __radix) { \ + __upper = __high; \ + __high = 0; \ + } else { \ + __upper = __high % __radix; \ + __high /= __radix; \ + } \ + \ + __mod = do_div64_32(__low, __upper, __low, __radix); \ + \ + __quot = __high; \ + __quot = __quot << 32 | __low; \ + (*n) = __quot; \ + __mod; \ +}) + +#endif /* BITS_PER_LONG == 32 */ + +#include <asm-generic/div64.h> #endif /* __ASM_DIV64_H */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 78537aa23500..2c138450ad3b 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -100,11 +100,23 @@ static inline void set_io_port_base(unsigned long base) * almost all conceivable cases a device driver should not be using * this function */ -static inline unsigned long virt_to_phys(volatile const void *address) +static inline unsigned long __virt_to_phys_nodebug(volatile const void *address) { return __pa(address); } +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __virt_to_phys(volatile const void *x); +#else +#define __virt_to_phys(x) __virt_to_phys_nodebug(x) +#endif + +#define virt_to_phys virt_to_phys +static inline phys_addr_t virt_to_phys(const volatile void *x) +{ + return __virt_to_phys(x); +} + /* * phys_to_virt - map physical address to virtual * @address: address to remap diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 3a5612e7304c..603ad562d101 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -88,44 +88,10 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 -#ifdef CONFIG_KVM_MIPS_VZ extern unsigned long GUESTID_MASK; extern unsigned long GUESTID_FIRST_VERSION; extern unsigned long GUESTID_VERSION_MASK; -#endif - - -/* - * Special address that contains the comm page, used for reducing # of traps - * This needs to be within 32Kb of 0x0 (so the zero register can be used), but - * preferably not at 0x0 so that most kernel NULL pointer dereferences can be - * caught. - */ -#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ - (0x8000 - PAGE_SIZE)) -#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ - ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) - -#define KVM_GUEST_KUSEG 0x00000000UL -#define KVM_GUEST_KSEG0 0x40000000UL -#define KVM_GUEST_KSEG1 0x40000000UL -#define KVM_GUEST_KSEG23 0x60000000UL -#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) -#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) - -#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) -#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) -#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) - -/* - * Map an address to a certain kernel segment - */ -#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) -#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) -#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) - -#define KVM_INVALID_PAGE 0xdeadbeef #define KVM_INVALID_ADDR 0xdeadbeef /* @@ -165,7 +131,6 @@ struct kvm_vcpu_stat { u64 fpe_exits; u64 msa_disabled_exits; u64 flush_dcache_exits; -#ifdef CONFIG_KVM_MIPS_VZ u64 vz_gpsi_exits; u64 vz_gsfc_exits; u64 vz_hc_exits; @@ -177,7 +142,6 @@ struct kvm_vcpu_stat { #ifdef CONFIG_CPU_LOONGSON64 u64 vz_cpucfg_exits; #endif -#endif u64 halt_successful_poll; u64 halt_attempted_poll; u64 halt_poll_success_ns; @@ -303,14 +267,6 @@ enum emulation_result { EMULATE_HYPERCALL, /* HYPCALL instruction */ }; -#define mips3_paddr_to_tlbpfn(x) \ - (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) -#define mips3_tlbpfn_to_paddr(x) \ - ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) - -#define MIPS3_PG_SHIFT 6 -#define MIPS3_PG_FRAME 0x3fffffc0 - #if defined(CONFIG_64BIT) #define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) #else @@ -337,7 +293,6 @@ struct kvm_mips_tlb { #define KVM_MIPS_AUX_FPU 0x1 #define KVM_MIPS_AUX_MSA 0x2 -#define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { void *guest_ebase; int (*vcpu_run)(struct kvm_vcpu *vcpu); @@ -370,9 +325,6 @@ struct kvm_vcpu_arch { /* COP0 State */ struct mips_coproc *cop0; - /* Host KSEG0 address of the EI/DI offset */ - void *kseg0_commpage; - /* Resume PC after MMIO completion */ unsigned long io_pc; /* GPR used as IO source/target */ @@ -398,19 +350,9 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; - /* S/W Based TLB for guest */ - struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; - - /* Guest kernel/user [partial] mm */ - struct mm_struct guest_kernel_mm, guest_user_mm; - - /* Guest ASID of last user mode execution */ - unsigned int last_user_gasid; - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; -#ifdef CONFIG_KVM_MIPS_VZ /* vcpu's vzguestid is different on each host cpu in an smp system */ u32 vzguestid[NR_CPUS]; @@ -421,7 +363,6 @@ struct kvm_vcpu_arch { /* emulated guest MAAR registers */ unsigned long maar[6]; -#endif /* Last CPU the VCPU state was loaded on */ int last_sched_cpu; @@ -651,20 +592,6 @@ static inline void kvm_change_##name1(struct mips_coproc *cop0, \ __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) -#ifndef CONFIG_KVM_MIPS_VZ - -/* - * T&E (trap & emulate software based virtualisation) - * We generate the common accessors operating exclusively on the saved context - * in RAM. - */ - -#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW -#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW -#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW - -#else - /* * VZ (hardware assisted virtualisation) * These macros use the active guest state in VZ mode (hardware registers), @@ -697,8 +624,6 @@ static inline void kvm_change_##name1(struct mips_coproc *cop0, \ */ #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW -#endif - /* * Define accessors for CP0 registers that are accessible to the guest. These * are primarily used by common emulation code, which may need to access the @@ -874,42 +799,9 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu); void kvm_lose_fpu(struct kvm_vcpu *vcpu); /* TLB handling */ -u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); - -u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); - -u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); - -#ifdef CONFIG_KVM_MIPS_VZ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu, bool write_fault); -#endif -extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, - struct kvm_vcpu *vcpu, - bool write_fault); - -extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu); -extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, - unsigned long gva, - bool write_fault); - -extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu, - bool write_fault); - -extern void kvm_mips_dump_host_tlbs(void); -extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); -extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, - bool user, bool kernel); - -extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, - unsigned long entryhi); - -#ifdef CONFIG_KVM_MIPS_VZ int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, unsigned long *gpa); @@ -923,48 +815,13 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, void kvm_loongson_clear_guest_vtlb(void); void kvm_loongson_clear_guest_ftlb(void); #endif -#endif - -void kvm_mips_suspend_mm(int cpu); -void kvm_mips_resume_mm(int cpu); /* MMU handling */ -/** - * enum kvm_mips_flush - Types of MMU flushes. - * @KMF_USER: Flush guest user virtual memory mappings. - * Guest USeg only. - * @KMF_KERN: Flush guest kernel virtual memory mappings. - * Guest USeg and KSeg2/3. - * @KMF_GPA: Flush guest physical memory mappings. - * Also includes KSeg0 if KMF_KERN is set. - */ -enum kvm_mips_flush { - KMF_USER = 0x0, - KMF_KERN = 0x1, - KMF_GPA = 0x2, -}; -void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); pgd_t *kvm_pgd_alloc(void); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); -void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, - bool user); -void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu); -void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu); - -enum kvm_mips_fault_result { - KVM_MIPS_MAPPED = 0, - KVM_MIPS_GVA, - KVM_MIPS_GPA, - KVM_MIPS_TLB, - KVM_MIPS_TLBINV, - KVM_MIPS_TLBMOD, -}; -enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, - unsigned long gva, - bool write); #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, @@ -974,7 +831,6 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); /* Emulation */ -int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); @@ -1006,68 +862,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) return false; } -extern enum emulation_result kvm_mips_emulate_inst(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_handle_ri(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - -extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); - extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu); u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); @@ -1087,26 +881,9 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count); int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, u32 count, int min_drift); -#ifdef CONFIG_KVM_MIPS_VZ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu); void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu); -#else -static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {} -static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {} -#endif - -enum emulation_result kvm_mips_check_privilege(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, - u32 *opc, - u32 cause, - struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, - u32 *opc, - u32 cause, - struct kvm_vcpu *vcpu); enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, u32 cause, struct kvm_vcpu *vcpu); @@ -1117,27 +894,12 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, /* COP0 */ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); -unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); - /* Hypercalls (hypcall.c) */ enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu, union mips_instruction inst); int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu); -/* Dynamic binary translation */ -extern int kvm_mips_trans_cache_index(union mips_instruction inst, - u32 *opc, struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu); - /* Misc */ extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index c38b38ce5a3d..b071a7353ee1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -157,4 +157,12 @@ octeon_main_processor: .macro smp_slave_setup .endm +#define USE_KEXEC_SMP_WAIT_FINAL + .macro kexec_smp_wait_final + .set push + .set noreorder + synci 0($0) + .set pop + .endm + #endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h index c3ac06a6acd2..b247575c5e69 100644 --- a/arch/mips/include/asm/mach-generic/spaces.h +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -30,11 +30,7 @@ #endif /* __ASSEMBLY__ */ #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -#define CAC_BASE _AC(0x40000000, UL) -#else #define CAC_BASE _AC(0x80000000, UL) -#endif #ifndef IO_BASE #define IO_BASE _AC(0xa0000000, UL) #endif @@ -43,12 +39,8 @@ #endif #ifndef MAP_BASE -#ifdef CONFIG_KVM_GUEST -#define MAP_BASE _AC(0x60000000, UL) -#else #define MAP_BASE _AC(0xc0000000, UL) #endif -#endif /* * Memory above this physical address will be considered highmem. @@ -100,11 +92,7 @@ #endif #ifndef FIXADDR_TOP -#ifdef CONFIG_KVM_GUEST -#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) -#else #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) #endif -#endif #endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h index 4592841b6b0c..035b1a69e2d0 100644 --- a/arch/mips/include/asm/mach-loongson64/boot_param.h +++ b/arch/mips/include/asm/mach-loongson64/boot_param.h @@ -198,33 +198,6 @@ enum loongson_bridge_type { VIRTUAL = 3 }; -struct loongson_system_configuration { - u32 nr_cpus; - u32 nr_nodes; - int cores_per_node; - int cores_per_package; - u16 boot_cpu_id; - u16 reserved_cpus_mask; - enum loongson_cpu_type cputype; - enum loongson_bridge_type bridgetype; - u64 ht_control_base; - u64 pci_mem_start_addr; - u64 pci_mem_end_addr; - u64 pci_io_base; - u64 restart_addr; - u64 poweroff_addr; - u64 suspend_addr; - u64 vgabios_addr; - u32 dma_mask_bits; - char ecname[32]; - u32 nr_uarts; - struct uart_device uarts[MAX_UARTS]; - u32 nr_sensors; - struct sensor_device sensors[MAX_SENSORS]; - u64 workarounds; - void (*early_config)(void); -}; - extern struct efi_memory_map_loongson *loongson_memmap; extern struct loongson_system_configuration loongson_sysconf; diff --git a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h index 839410cda621..8be710557bdb 100644 --- a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h +++ b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h @@ -8,6 +8,7 @@ #ifndef __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_ #define __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_ +extern u32 __dtb_loongson64_2core_2k1000_begin[]; extern u32 __dtb_loongson64c_4core_ls7a_begin[]; extern u32 __dtb_loongson64c_4core_rs780e_begin[]; extern u32 __dtb_loongson64c_8core_rs780e_begin[]; diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index e4d77f4f0fe3..13373c5144f8 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -75,4 +75,31 @@ .set pop .endm +#define USE_KEXEC_SMP_WAIT_FINAL + .macro kexec_smp_wait_final + /* s0:prid s1:initfn */ + /* a0:base t1:cpuid t2:node t9:count */ + mfc0 t1, CP0_EBASE + andi t1, MIPS_EBASE_CPUNUM + dins a0, t1, 8, 2 /* insert core id*/ + dext t2, t1, 2, 2 + dins a0, t2, 44, 2 /* insert node id */ + mfc0 s0, CP0_PRID + andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK) + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1), 1f + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2), 1f + b 2f /* Loongson-3A1000/3A2000/3A3000/3A4000 */ +1: dins a0, t2, 14, 2 /* Loongson-3B1000/3B1500 need bit 15~14 */ +2: li t9, 0x100 /* wait for init loop */ +3: addiu t9, -1 /* limit mailbox access */ + bnez t9, 3b + lw s1, 0x20(a0) /* check PC as an indicator */ + beqz s1, 2b + ld s1, 0x20(a0) /* get PC via mailbox reg0 */ + ld sp, 0x28(a0) /* get SP via mailbox reg1 */ + ld gp, 0x30(a0) /* get GP via mailbox reg2 */ + ld a1, 0x38(a0) + jr s1 /* jump to initial PC */ + .endm + #endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index ac1c20e172a2..f7c3ab6d724e 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h @@ -12,6 +12,30 @@ #include <linux/irq.h> #include <boot_param.h> +enum loongson_fw_interface { + LOONGSON_LEFI, + LOONGSON_DTB, +}; + +/* machine-specific boot configuration */ +struct loongson_system_configuration { + enum loongson_fw_interface fw_interface; + u32 nr_cpus; + u32 nr_nodes; + int cores_per_node; + int cores_per_package; + u16 boot_cpu_id; + u16 reserved_cpus_mask; + enum loongson_cpu_type cputype; + enum loongson_bridge_type bridgetype; + u64 restart_addr; + u64 poweroff_addr; + u64 suspend_addr; + u64 vgabios_addr; + u32 dma_mask_bits; + u64 workarounds; + void (*early_config)(void); +}; /* machine-specific reboot/halt operation */ extern void mach_prepare_reboot(void); @@ -23,7 +47,8 @@ extern u32 memsize, highmemsize; extern const struct plat_smp_ops loongson3_smp_ops; /* loongson-specific command line, env and memory initialization */ -extern void __init prom_init_env(void); +extern void __init prom_dtb_init_env(void); +extern void __init prom_lefi_init_env(void); extern void __init szmem(unsigned int node); extern void *loongson_fdt_blob; diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h index e1af1ba50bd8..6bbf082dd149 100644 --- a/arch/mips/include/asm/mach-ralink/mt7621.h +++ b/arch/mips/include/asm/mach-ralink/mt7621.h @@ -24,9 +24,10 @@ #define CHIP_REV_VER_SHIFT 8 #define CHIP_REV_ECO_MASK 0xf -#define MT7621_DRAM_BASE 0x0 -#define MT7621_DDR2_SIZE_MIN 32 -#define MT7621_DDR2_SIZE_MAX 256 +#define MT7621_LOWMEM_BASE 0x0 +#define MT7621_LOWMEM_MAX_SIZE 0x1C000000 +#define MT7621_HIGHMEM_BASE 0x20000000 +#define MT7621_HIGHMEM_SIZE 0x4000000 #define MT7621_CHIP_NAME0 0x3637544D #define MT7621_CHIP_NAME1 0x20203132 diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h index fd43d876892e..35fb8ee6dd33 100644 --- a/arch/mips/include/asm/mips-cps.h +++ b/arch/mips/include/asm/mips-cps.h @@ -10,6 +10,8 @@ #include <linux/io.h> #include <linux/types.h> +#include <asm/mips-boards/launch.h> + extern unsigned long __cps_access_bad_size(void) __compiletime_error("Bad size for CPS accessor"); @@ -165,11 +167,30 @@ static inline uint64_t mips_cps_cluster_config(unsigned int cluster) */ static inline unsigned int mips_cps_numcores(unsigned int cluster) { + unsigned int ncores; + if (!mips_cm_present()) return 0; /* Add one before masking to handle 0xff indicating no cores */ - return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; + ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; + + if (IS_ENABLED(CONFIG_SOC_MT7621)) { + struct cpulaunch *launch; + + /* + * Ralink MT7621S SoC is single core, but the GCR_CONFIG method + * always reports 2 cores. Check the second core's LAUNCH_FREADY + * flag to detect if the second core is missing. This method + * only works before the core has been started. + */ + launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); + launch += 2; /* MT7621 has 2 VPEs per core */ + if (!(launch->flags & LAUNCH_FREADY)) + ncores = 1; + } + + return ncores; } /** diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h index e4444f8c4a61..5df5c90f6a5d 100644 --- a/arch/mips/include/asm/octeon/cvmx-address.h +++ b/arch/mips/include/asm/octeon/cvmx-address.h @@ -152,7 +152,7 @@ typedef union { /* physical mem address */ struct { - /* techically, <47:40> are dont-cares */ + /* technically, <47:40> are dont-cares */ uint64_t zeroes:24; /* the hardware ignores <39:36> in Octeon I */ uint64_t unaddr:4; diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index c114a7ba0bad..0e6bf220db61 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -298,6 +298,7 @@ enum cvmx_board_types_enum { CVMX_BOARD_TYPE_UBNT_E200 = 20003, CVMX_BOARD_TYPE_UBNT_E220 = 20005, CVMX_BOARD_TYPE_CUST_DSR1000N = 20006, + CVMX_BOARD_TYPE_UBNT_E300 = 20300, CVMX_BOARD_TYPE_KONTRON_S1901 = 21901, CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, @@ -401,6 +402,7 @@ static inline const char *cvmx_board_type_to_string(enum ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E200) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E220) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E300) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) } diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 65acab9c41f9..195ff4e9771f 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -210,9 +210,16 @@ static inline unsigned long ___pa(unsigned long x) * also affect MIPS so we keep this one until GCC 3.x has been retired * before we can apply https://patchwork.linux-mips.org/patch/1541/ */ +#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __phys_addr_symbol(unsigned long x); +#else +#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) +#endif #ifndef __pa_symbol -#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) +#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x)) #endif #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 6f48649201c5..9ffc8192adae 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -38,7 +38,6 @@ struct pci_controller { struct resource *io_resource; unsigned long io_offset; unsigned long io_map_base; - struct resource *busn_resource; #ifndef CONFIG_PCI_DOMAINS_GENERIC unsigned int index; diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 7834e7c0c78a..0c3550c82b72 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -32,16 +32,11 @@ extern unsigned int vced_count, vcei_count; extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -/* User space process size is limited to 1GB in KVM Guest Mode */ -#define TASK_SIZE 0x3fff8000UL -#else /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ #define TASK_SIZE 0x80000000UL -#endif #define STACK_TOP_MAX TASK_SIZE @@ -226,10 +221,6 @@ struct nlm_cop2_state { #define COP2_INIT #endif -typedef struct { - unsigned long seg; -} mm_segment_t; - #ifdef CONFIG_CPU_HAS_MSA # define ARCH_MIN_TASKALIGN 16 # define FPU_ALIGN __aligned(16) diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index e2c352da3877..0b17aaa9e012 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -28,11 +28,6 @@ struct thread_info { unsigned long tp_value; /* thread pointer */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; /* - * thread address space limit: - * 0x7fffffff for user-thead - * 0xffffffff for kernel-thread - */ struct pt_regs *regs; long syscall; /* syscall number */ }; @@ -46,7 +41,6 @@ struct thread_info { .flags = _TIF_FIXADE, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 61fc01f177a6..783fecce65c8 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -16,20 +16,9 @@ #include <asm/asm-eva.h> #include <asm/extable.h> -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -#define __UA_LIMIT 0x40000000UL -#else #define __UA_LIMIT 0x80000000UL -#endif #define __UA_ADDR ".word" #define __UA_LA "la" @@ -54,43 +43,6 @@ extern u64 __ua_limit; #endif /* CONFIG_64BIT */ /* - * USER_DS is a bitmask that has the bits set that may not be set in a valid - * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but - * the arithmetic we're doing only works if the limit is a power of two, so - * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid - * address in this range it's the process's problem, not ours :-) - */ - -#ifdef CONFIG_KVM_GUEST -#define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) -#define USER_DS ((mm_segment_t) { 0xC0000000UL }) -#else -#define KERNEL_DS ((mm_segment_t) { 0UL }) -#define USER_DS ((mm_segment_t) { __UA_LIMIT }) -#endif - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* - * eva_kernel_access() - determine whether kernel memory access on an EVA system - * - * Determines whether memory accesses should be performed to kernel memory - * on a system using Extended Virtual Addressing (EVA). - * - * Return: true if a kernel memory access on an EVA system, else false. - */ -static inline bool eva_kernel_access(void) -{ - if (!IS_ENABLED(CONFIG_EVA)) - return false; - - return uaccess_kernel(); -} - -/* * Is a address valid? This does a straightforward calculation rather * than tests. * @@ -127,7 +79,9 @@ static inline bool eva_kernel_access(void) static inline int __access_ok(const void __user *p, unsigned long size) { unsigned long addr = (unsigned long)p; - return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; + unsigned long end = addr + size - !!size; + + return (__UA_LIMIT & (addr | end | __ua_size(size))) == 0; } #define access_ok(addr, size) \ @@ -150,8 +104,13 @@ static inline int __access_ok(const void __user *p, unsigned long size) * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ - __put_user_check((x), (ptr), sizeof(*(ptr))) +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \ +}) /* * get_user: - Get a simple variable from user space. @@ -171,8 +130,14 @@ static inline int __access_ok(const void __user *p, unsigned long size) * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ - __get_user_check((x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \ + ((x) = 0, -EFAULT); \ +}) /* * __put_user: - Write a simple value into user space, with less checking. @@ -194,8 +159,32 @@ static inline int __access_ok(const void __user *p, unsigned long size) * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = 0; \ + \ + __chk_user_ptr(__pu_ptr); \ + switch (sizeof(*__pu_ptr)) { \ + case 1: \ + __put_data_asm(user_sb, __pu_ptr); \ + break; \ + case 2: \ + __put_data_asm(user_sh, __pu_ptr); \ + break; \ + case 4: \ + __put_data_asm(user_sw, __pu_ptr); \ + break; \ + case 8: \ + __PUT_DW(user_sd, __pu_ptr); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + \ + __pu_err; \ +}) /* * __get_user: - Get a simple variable from user space, with less checking. @@ -218,49 +207,35 @@ static inline int __access_ok(const void __user *p, unsigned long size) * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + int __gu_err = 0; \ + \ + __chk_user_ptr(__gu_ptr); \ + switch (sizeof(*__gu_ptr)) { \ + case 1: \ + __get_data_asm((x), user_lb, __gu_ptr); \ + break; \ + case 2: \ + __get_data_asm((x), user_lh, __gu_ptr); \ + break; \ + case 4: \ + __get_data_asm((x), user_lw, __gu_ptr); \ + break; \ + case 8: \ + __GET_DW((x), user_ld, __gu_ptr); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + \ + __gu_err; \ +}) struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) -/* - * Yuck. We need two variants, one for 64bit operation and one - * for 32 bit mode and old iron. - */ -#ifndef CONFIG_EVA -#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) -#else -/* - * Kernel specific functions for EVA. We need to use normal load instructions - * to read data from kernel when operating in EVA mode. We use these macros to - * avoid redefining __get_user_asm for EVA. - */ -#undef _loadd -#undef _loadw -#undef _loadh -#undef _loadb -#ifdef CONFIG_32BIT -#define _loadd _loadw -#else -#define _loadd(reg, addr) "ld " reg ", " addr -#endif -#define _loadw(reg, addr) "lw " reg ", " addr -#define _loadh(reg, addr) "lh " reg ", " addr -#define _loadb(reg, addr) "lb " reg ", " addr - -#define __get_kernel_common(val, size, ptr) \ -do { \ - switch (size) { \ - case 1: __get_data_asm(val, _loadb, ptr); break; \ - case 2: __get_data_asm(val, _loadh, ptr); break; \ - case 4: __get_data_asm(val, _loadw, ptr); break; \ - case 8: __GET_DW(val, _loadd, ptr); break; \ - default: __get_user_unknown(); break; \ - } \ -} while (0) -#endif - #ifdef CONFIG_32BIT #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr) #endif @@ -268,49 +243,6 @@ do { \ #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr) #endif -extern void __get_user_unknown(void); - -#define __get_user_common(val, size, ptr) \ -do { \ - switch (size) { \ - case 1: __get_data_asm(val, user_lb, ptr); break; \ - case 2: __get_data_asm(val, user_lh, ptr); break; \ - case 4: __get_data_asm(val, user_lw, ptr); break; \ - case 8: __GET_DW(val, user_ld, ptr); break; \ - default: __get_user_unknown(); break; \ - } \ -} while (0) - -#define __get_user_nocheck(x, ptr, size) \ -({ \ - int __gu_err; \ - \ - if (eva_kernel_access()) { \ - __get_kernel_common((x), size, ptr); \ - } else { \ - __chk_user_ptr(ptr); \ - __get_user_common((x), size, ptr); \ - } \ - __gu_err; \ -}) - -#define __get_user_check(x, ptr, size) \ -({ \ - int __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ - \ - might_fault(); \ - if (likely(access_ok( __gu_ptr, size))) { \ - if (eva_kernel_access()) \ - __get_kernel_common((x), size, __gu_ptr); \ - else \ - __get_user_common((x), size, __gu_ptr); \ - } else \ - (x) = 0; \ - \ - __gu_err; \ -}) - #define __get_data_asm(val, insn, addr) \ { \ long __gu_tmp; \ @@ -364,39 +296,36 @@ do { \ (val) = __gu_tmp.t; \ } -#ifndef CONFIG_EVA -#define __put_kernel_common(ptr, size) __put_user_common(ptr, size) -#else -/* - * Kernel specific functions for EVA. We need to use normal load instructions - * to read data from kernel when operating in EVA mode. We use these macros to - * avoid redefining __get_data_asm for EVA. - */ -#undef _stored -#undef _storew -#undef _storeh -#undef _storeb -#ifdef CONFIG_32BIT -#define _stored _storew -#else -#define _stored(reg, addr) "ld " reg ", " addr -#endif - -#define _storew(reg, addr) "sw " reg ", " addr -#define _storeh(reg, addr) "sh " reg ", " addr -#define _storeb(reg, addr) "sb " reg ", " addr +#define HAVE_GET_KERNEL_NOFAULT -#define __put_kernel_common(ptr, size) \ +#define __get_kernel_nofault(dst, src, type, err_label) \ do { \ - switch (size) { \ - case 1: __put_data_asm(_storeb, ptr); break; \ - case 2: __put_data_asm(_storeh, ptr); break; \ - case 4: __put_data_asm(_storew, ptr); break; \ - case 8: __PUT_DW(_stored, ptr); break; \ - default: __put_user_unknown(); break; \ + int __gu_err; \ + \ + switch (sizeof(type)) { \ + case 1: \ + __get_data_asm(*(type *)(dst), kernel_lb, \ + (__force type *)(src)); \ + break; \ + case 2: \ + __get_data_asm(*(type *)(dst), kernel_lh, \ + (__force type *)(src)); \ + break; \ + case 4: \ + __get_data_asm(*(type *)(dst), kernel_lw, \ + (__force type *)(src)); \ + break; \ + case 8: \ + __GET_DW(*(type *)(dst), kernel_ld, \ + (__force type *)(src)); \ + break; \ + default: \ + BUILD_BUG(); \ + break; \ } \ -} while(0) -#endif + if (unlikely(__gu_err)) \ + goto err_label; \ +} while (0) /* * Yuck. We need two variants, one for 64bit operation and one @@ -409,49 +338,6 @@ do { \ #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr) #endif -#define __put_user_common(ptr, size) \ -do { \ - switch (size) { \ - case 1: __put_data_asm(user_sb, ptr); break; \ - case 2: __put_data_asm(user_sh, ptr); break; \ - case 4: __put_data_asm(user_sw, ptr); break; \ - case 8: __PUT_DW(user_sd, ptr); break; \ - default: __put_user_unknown(); break; \ - } \ -} while (0) - -#define __put_user_nocheck(x, ptr, size) \ -({ \ - __typeof__(*(ptr)) __pu_val; \ - int __pu_err = 0; \ - \ - __pu_val = (x); \ - if (eva_kernel_access()) { \ - __put_kernel_common(ptr, size); \ - } else { \ - __chk_user_ptr(ptr); \ - __put_user_common(ptr, size); \ - } \ - __pu_err; \ -}) - -#define __put_user_check(x, ptr, size) \ -({ \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - int __pu_err = -EFAULT; \ - \ - might_fault(); \ - if (likely(access_ok( __pu_addr, size))) { \ - if (eva_kernel_access()) \ - __put_kernel_common(__pu_addr, size); \ - else \ - __put_user_common(__pu_addr, size); \ - } \ - \ - __pu_err; \ -}) - #define __put_data_asm(insn, ptr) \ { \ __asm__ __volatile__( \ @@ -490,7 +376,33 @@ do { \ "i" (-EFAULT)); \ } -extern void __put_user_unknown(void); +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + type __pu_val; \ + int __pu_err = 0; \ + \ + __pu_val = *(__force type *)(src); \ + switch (sizeof(type)) { \ + case 1: \ + __put_data_asm(kernel_sb, (type *)(dst)); \ + break; \ + case 2: \ + __put_data_asm(kernel_sh, (type *)(dst)); \ + break; \ + case 4: \ + __put_data_asm(kernel_sw, (type *)(dst)) \ + break; \ + case 8: \ + __PUT_DW(kernel_sd, (type *)(dst)); \ + break; \ + default: \ + BUILD_BUG(); \ + break; \ + } \ + if (unlikely(__pu_err)) \ + goto err_label; \ +} while (0) + /* * We're generating jump to subroutines which will be outside the range of @@ -514,124 +426,85 @@ extern void __put_user_unknown(void); #define DADDI_SCRATCH "$0" #endif -extern size_t __copy_user(void *__to, const void *__from, size_t __n); - -#define __invoke_copy_from(func, to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to(func, to, from, n) \ -({ \ - register void __user *__cu_to_r __asm__("$4"); \ - register const void *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#ifndef CONFIG_EVA -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#else - -/* EVA specific functions */ - -extern size_t __copy_from_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_to_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); - -/* - * Source or destination address is in userland. We need to go through - * the TLB - */ -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_from_user_eva, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_to_user_eva, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_in_user_eva, to, from, n) - -#endif /* CONFIG_EVA */ +extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n); +extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n); +extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n); static inline unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_to_kernel(to, from, n); - else - return __invoke_copy_to_user(to, from, n); + register void *__cu_to_r __asm__("$4"); + register const void __user *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = to; + __cu_from_r = from; + __cu_len_r = n; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + __MODULE_JAL(__raw_copy_from_user) + ".set\tnoat\n\t" + __UA_ADDU "\t$1, %1, %2\n\t" + ".set\tat\n\t" + ".set\treorder" + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + + return __cu_len_r; } static inline unsigned long -raw_copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_from_kernel(to, from, n); - else - return __invoke_copy_from_user(to, from, n); + register void __user *__cu_to_r __asm__("$4"); + register const void *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = (to); + __cu_from_r = (from); + __cu_len_r = (n); + + __asm__ __volatile__( + __MODULE_JAL(__raw_copy_to_user) + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + + return __cu_len_r; } #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER static inline unsigned long -raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return ___invoke_copy_in_kernel(to, from, n); - else - return ___invoke_copy_in_user(to, from, n); + register void __user *__cu_to_r __asm__("$4"); + register const void __user *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = to; + __cu_from_r = from; + __cu_len_r = n; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + __MODULE_JAL(__raw_copy_in_user) + ".set\tnoat\n\t" + __UA_ADDU "\t$1, %1, %2\n\t" + ".set\tat\n\t" + ".set\treorder" + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + return __cu_len_r; } -extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); /* @@ -657,28 +530,16 @@ __clear_user(void __user *addr, __kernel_size_t size) #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" #endif /* CONFIG_CPU_MICROMIPS */ - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero_kernel) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : bzero_clobbers); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : bzero_clobbers); - } + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : bzero_clobbers); return res; } @@ -692,7 +553,6 @@ __clear_user(void __user *addr, __kernel_size_t size) __cl_size; \ }) -extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len); /* @@ -718,33 +578,23 @@ strncpy_from_user(char *__to, const char __user *__from, long __len) { long res; - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } + if (!access_ok(__from, __len)) + return -EFAULT; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); return res; } -extern long __strnlen_kernel_asm(const char __user *s, long n); extern long __strnlen_user_asm(const char __user *s, long n); /* @@ -764,26 +614,18 @@ static inline long strnlen_user(const char __user *s, long n) { long res; + if (!access_ok(s, 1)) + return 0; + might_fault(); - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } else { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); return res; } diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index 2203e2d0ae2a..44a45f3fa4b0 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -20,6 +20,12 @@ #define VDSO_HAS_CLOCK_GETRES 1 +#if MIPS_ISA_REV < 6 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo", +#else +#define VDSO_SYSCALL_CLOBBERS +#endif + static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback( : "=r" (ret), "=r" (error) : "r" (tv), "r" (tz), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index b4a57f1de772..814b3da30501 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -17,10 +17,10 @@ obj-y += cpu-probe.o endif ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_early_printk.o = -pg -CFLAGS_REMOVE_perf_event.o = -pg -CFLAGS_REMOVE_perf_event_mipsxx.o = -pg +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early_printk.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf_event_mipsxx.o = $(CC_FLAGS_FTRACE) endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o diff --git a/arch/mips/kernel/access-helper.h b/arch/mips/kernel/access-helper.h new file mode 100644 index 000000000000..590388031503 --- /dev/null +++ b/arch/mips/kernel/access-helper.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/uaccess.h> + +static inline int __get_addr(unsigned long *a, unsigned long *p, bool user) +{ + return user ? get_user(*a, (unsigned long __user *)p) : + get_kernel_nofault(*a, p); +} + +static inline int __get_inst16(u16 *i, u16 *p, bool user) +{ + return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p); +} + +static inline int __get_inst32(u32 *i, u32 *p, bool user) +{ + return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p); +} diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index aebfda81120a..5735b2cd6f2a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -98,7 +98,6 @@ void output_thread_info_defines(void) OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); - OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 995ad9e69ded..32ec67c9ab67 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -195,10 +195,6 @@ int c0_compare_int_usable(void) unsigned int delta; unsigned int cnt; -#ifdef CONFIG_KVM_GUEST - return 1; -#endif - /* * IP7 already pending? Try to clear it by acking the timer. */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b71892064f27..0ef240adefb5 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_isa(c, MIPS_CPU_ISA_M64R2); break; } - c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); break; @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) * register, we correct it here. */ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); decode_cpucfg(c); - c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; default: panic("Unknown Loongson Processor ID!"); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 666b9969c1bd..8c401e42301c 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -90,7 +90,6 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; - mm_segment_t old_fs; safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) @@ -102,10 +101,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, return -EFAULT; ip -= 4; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } @@ -114,7 +110,6 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; - mm_segment_t old_fs; ip += 4; safe_store_code(new_code2, ip, faulted); @@ -126,10 +121,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, if (unlikely(faulted)) return -EFAULT; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7efa0d1a4c2b..bff080db0294 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); - ti->addr_limit = KERNEL_DS; p->thread.reg16 = usp; /* fn */ p->thread.reg17 = kthread_arg; p->thread.reg29 = childksp; @@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->regs[2] = 0; /* Child gets zero as return value */ if (usp) childregs->regs[29] = usp; - ti->addr_limit = USER_DS; p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index ac870893ba2d..f3c908abdbb8 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -11,6 +11,8 @@ #include <asm/stackframe.h> #include <asm/addrspace.h> +#include <kernel-entry-init.h> + LEAF(relocate_new_kernel) PTR_L a0, arg0 PTR_L a1, arg1 @@ -125,11 +127,8 @@ LEAF(kexec_smp_wait) 1: LONG_L s0, (t0) bne s0, zero,1b -#ifdef CONFIG_CPU_CAVIUM_OCTEON - .set push - .set noreorder - synci 0($0) - .set pop +#ifdef USE_KEXEC_SMP_WAIT_FINAL + kexec_smp_wait_final #else sync #endif diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index b449b68662a9..b1b2e106f711 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp) * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ - lw t5, TI_ADDR_LIMIT($28) addu t4, t0, 32 - and t5, t4 - bltz t5, bad_stack # -> sp is bad + bltz t4, bad_stack # -> sp is bad /* * Ok, copy the args from the luser stack to the kernel stack. @@ -217,9 +215,9 @@ einval: li v0, -ENOSYS #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) PTR entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) -#include <asm/syscall_table_32_o32.h> -#undef __SYSCALL +#include <asm/syscall_table_o32.h> diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 35d8c86b160e..f650c55a17dc 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -104,5 +104,4 @@ not_n32_scall: #define __SYSCALL(nr, entry) PTR entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) -#include <asm/syscall_table_64_n32.h> -#undef __SYSCALL +#include <asm/syscall_table_n32.h> diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index 5e9c497ce099..5d7bfc65e4d0 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -113,5 +113,4 @@ illegal_syscall: .align 3 .type sys_call_table, @object EXPORT(sys_call_table) -#include <asm/syscall_table_64_n64.h> -#undef __SYSCALL +#include <asm/syscall_table_n64.h> diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 50c9a57e0d3a..cedc8bd88804 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -213,9 +213,9 @@ einval: li v0, -ENOSYS jr ra END(sys32_syscall) +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL(nr, entry) PTR entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) -#include <asm/syscall_table_64_o32.h> -#undef __SYSCALL +#include <asm/syscall_table_o32.h> diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 359b176b665f..b6ef5f7312cf 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -134,17 +134,24 @@ static void __init bmips_smp_setup(void) if (!board_ebase_setup) board_ebase_setup = &bmips_ebase_setup; - __cpu_number_map[boot_cpu] = 0; - __cpu_logical_map[0] = boot_cpu; - - for (i = 0; i < max_cpus; i++) { - if (i != boot_cpu) { - __cpu_number_map[i] = cpu; - __cpu_logical_map[cpu] = i; - cpu++; + if (max_cpus > 1) { + __cpu_number_map[boot_cpu] = 0; + __cpu_logical_map[0] = boot_cpu; + + for (i = 0; i < max_cpus; i++) { + if (i != boot_cpu) { + __cpu_number_map[i] = cpu; + __cpu_logical_map[cpu] = i; + cpu++; + } + set_cpu_possible(i, 1); + set_cpu_present(i, 1); } - set_cpu_possible(i, 1); - set_cpu_present(i, 1); + } else { + __cpu_number_map[0] = boot_cpu; + __cpu_logical_map[0] = 0; + set_cpu_possible(0, 1); + set_cpu_present(0, 1); } } diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c index ab4e3e1b138d..90f53e041a38 100644 --- a/arch/mips/kernel/spinlock_test.c +++ b/arch/mips/kernel/spinlock_test.c @@ -35,7 +35,7 @@ static int ss_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); @@ -114,13 +114,13 @@ static int multi_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); static int __init spinlock_test(void) { - debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, + debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL, &fops_ss); - debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, + debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, &fops_multi); return 0; } diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index 51f8b805f2ed..904452992992 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -8,15 +8,12 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscalln32 := $(src)/syscall_n32.tbl syscalln64 := $(src)/syscall_n64.tbl syscallo32 := $(src)/syscall_o32.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh +syshdr := $(srctree)/scripts/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --offset __NR_Linux $< $@ quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ @@ -25,20 +22,14 @@ quiet_cmd_sysnr = SYSNR $@ '$(sysnr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ -syshdr_offset_unistd_n32 := __NR_Linux $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_offset_unistd_n64 := __NR_Linux $(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_offset_unistd_o32 := __NR_Linux $(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE $(call if_changed,syshdr) @@ -57,33 +48,21 @@ sysnr_offset_unistd_nr_o32 := 4000 $(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_syscall_table_32_o32 := 32_o32 -systbl_offset_syscall_table_32_o32 := 4000 -$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_n32.h: $(syscalln32) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n32 := 64_n32 -systbl_offset_syscall_table_64_n32 := 6000 -$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE +$(kapi)/syscall_table_n64.h: $(syscalln64) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n64 := 64_n64 -systbl_offset_syscall_table_64_n64 := 5000 -$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE - $(call if_changed,systbl) - -systbl_abi_syscall_table_64_o32 := 64_o32 -systbl_offset_syscall_table_64_o32 := 4000 -$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_n32.h \ unistd_n64.h \ unistd_o32.h -kapisyshdr-y += syscall_table_32_o32.h \ - syscall_table_64_n32.h \ - syscall_table_64_n64.h \ - syscall_table_64_o32.h \ +kapisyshdr-y += syscall_table_n32.h \ + syscall_table_n64.h \ + syscall_table_o32.h \ unistd_nr_n32.h \ unistd_nr_n64.h \ unistd_nr_o32.h diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 2e241e713a7d..000000000000 --- a/arch/mips/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 1e2570740c20..000000000000 --- a/arch/mips/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 808b8b61ded1..0b4e06303c55 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -72,6 +72,8 @@ #include <asm/mach-loongson64/cpucfg-emul.h> +#include "access-helper.h" + extern void check_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); @@ -108,7 +110,8 @@ void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_ebase_setup)(void); void(*board_cache_error_setup)(void); -static void show_raw_backtrace(unsigned long reg29, const char *loglvl) +static void show_raw_backtrace(unsigned long reg29, const char *loglvl, + bool user) { unsigned long *sp = (unsigned long *)(reg29 & ~3); unsigned long addr; @@ -118,9 +121,7 @@ static void show_raw_backtrace(unsigned long reg29, const char *loglvl) printk("%s\n", loglvl); #endif while (!kstack_end(sp)) { - unsigned long __user *p = - (unsigned long __user *)(unsigned long)sp++; - if (__get_user(addr, p)) { + if (__get_addr(&addr, sp++, user)) { printk("%s (Bad stack address)", loglvl); break; } @@ -141,7 +142,7 @@ __setup("raw_show_trace", set_raw_show_trace); #endif static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, - const char *loglvl) + const char *loglvl, bool user) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; @@ -151,7 +152,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, task = current; if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { - show_raw_backtrace(sp, loglvl); + show_raw_backtrace(sp, loglvl, user); return; } printk("%sCall Trace:\n", loglvl); @@ -167,12 +168,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, * with at least a bit of error checking ... */ static void show_stacktrace(struct task_struct *task, - const struct pt_regs *regs, const char *loglvl) + const struct pt_regs *regs, const char *loglvl, bool user) { const int field = 2 * sizeof(unsigned long); - long stackdata; + unsigned long stackdata; int i; - unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; + unsigned long *sp = (unsigned long *)regs->regs[29]; printk("%sStack :", loglvl); i = 0; @@ -186,7 +187,7 @@ static void show_stacktrace(struct task_struct *task, break; } - if (__get_user(stackdata, sp++)) { + if (__get_addr(&stackdata, sp++, user)) { pr_cont(" (Bad stack address)"); break; } @@ -195,13 +196,12 @@ static void show_stacktrace(struct task_struct *task, i++; } pr_cont("\n"); - show_backtrace(task, regs, loglvl); + show_backtrace(task, regs, loglvl, user); } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { struct pt_regs regs; - mm_segment_t old_fs = get_fs(); regs.cp0_status = KSU_KERNEL; if (sp) { @@ -217,33 +217,41 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) prepare_frametrace(®s); } } - /* - * show_stack() deals exclusively with kernel mode, so be sure to access - * the stack in the kernel (not user) address space. - */ - set_fs(KERNEL_DS); - show_stacktrace(task, ®s, loglvl); - set_fs(old_fs); + show_stacktrace(task, ®s, loglvl, false); } -static void show_code(unsigned int __user *pc) +static void show_code(void *pc, bool user) { long i; - unsigned short __user *pc16 = NULL; + unsigned short *pc16 = NULL; printk("Code:"); if ((unsigned long)pc & 1) - pc16 = (unsigned short __user *)((unsigned long)pc & ~1); + pc16 = (u16 *)((unsigned long)pc & ~1); + for(i = -3 ; i < 6 ; i++) { - unsigned int insn; - if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { - pr_cont(" (Bad address in epc)\n"); - break; + if (pc16) { + u16 insn16; + + if (__get_inst16(&insn16, pc16 + i, user)) + goto bad_address; + + pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>')); + } else { + u32 insn32; + + if (__get_inst32(&insn32, (u32 *)pc + i, user)) + goto bad_address; + + pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>')); } - pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); } pr_cont("\n"); + return; + +bad_address: + pr_cont(" (Bad address in epc)\n\n"); } static void __show_regs(const struct pt_regs *regs) @@ -356,7 +364,6 @@ void show_regs(struct pt_regs *regs) void show_registers(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); - mm_segment_t old_fs = get_fs(); __show_regs(regs); print_modules(); @@ -371,13 +378,9 @@ void show_registers(struct pt_regs *regs) printk("*HwTLS: %0*lx\n", field, tls); } - if (!user_mode(regs)) - /* Necessary for getting the correct stack content */ - set_fs(KERNEL_DS); - show_stacktrace(current, regs, KERN_DEFAULT); - show_code((unsigned int __user *) regs->cp0_epc); + show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs)); + show_code((void *)regs->cp0_epc, user_mode(regs)); printk("\n"); - set_fs(old_fs); } static DEFINE_RAW_SPINLOCK(die_lock); @@ -1022,18 +1025,14 @@ asmlinkage void do_bp(struct pt_regs *regs) unsigned long epc = msk_isa16_mode(exception_epc(regs)); unsigned int opcode, bcode; enum ctx_state prev_state; - mm_segment_t seg; - - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); + bool user = user_mode(regs); prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { u16 instr[2]; - if (__get_user(instr[0], (u16 __user *)epc)) + if (__get_inst16(&instr[0], (u16 *)epc, user)) goto out_sigsegv; if (!cpu_has_mmips) { @@ -1044,13 +1043,13 @@ asmlinkage void do_bp(struct pt_regs *regs) bcode = instr[0] & 0xf; } else { /* 32-bit microMIPS BREAK */ - if (__get_user(instr[1], (u16 __user *)(epc + 2))) + if (__get_inst16(&instr[1], (u16 *)(epc + 2), user)) goto out_sigsegv; opcode = (instr[0] << 16) | instr[1]; bcode = (opcode >> 6) & ((1 << 20) - 1); } } else { - if (__get_user(opcode, (unsigned int __user *)epc)) + if (__get_inst32(&opcode, (u32 *)epc, user)) goto out_sigsegv; bcode = (opcode >> 6) & ((1 << 20) - 1); } @@ -1100,7 +1099,6 @@ asmlinkage void do_bp(struct pt_regs *regs) do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); out: - set_fs(seg); exception_exit(prev_state); return; @@ -1114,25 +1112,21 @@ asmlinkage void do_tr(struct pt_regs *regs) u32 opcode, tcode = 0; enum ctx_state prev_state; u16 instr[2]; - mm_segment_t seg; + bool user = user_mode(regs); unsigned long epc = msk_isa16_mode(exception_epc(regs)); - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); - prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { - if (__get_user(instr[0], (u16 __user *)(epc + 0)) || - __get_user(instr[1], (u16 __user *)(epc + 2))) + if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) || + __get_inst16(&instr[1], (u16 *)(epc + 2), user)) goto out_sigsegv; opcode = (instr[0] << 16) | instr[1]; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) tcode = (opcode >> 12) & ((1 << 4) - 1); } else { - if (__get_user(opcode, (u32 __user *)epc)) + if (__get_inst32(&opcode, (u32 *)epc, user)) goto out_sigsegv; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) @@ -1142,7 +1136,6 @@ asmlinkage void do_tr(struct pt_regs *regs) do_trap_or_bp(regs, tcode, 0, "Trap"); out: - set_fs(seg); exception_exit(prev_state); return; @@ -1591,7 +1584,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs) { int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; - mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1602,12 +1594,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } - if (!user_mode(regs)) - set_fs(KERNEL_DS); - - show_code((unsigned int __user *) regs->cp0_epc); - - set_fs(old_fs); + show_code((void *)regs->cp0_epc, user_mode(regs)); /* * Some chips may have other causes of machine check (e.g. SB1 diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 126a5f3f4e4c..df4b708c04a9 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -93,6 +93,8 @@ #include <asm/mmu_context.h> #include <linux/uaccess.h> +#include "access-helper.h" + enum { UNALIGNED_ACTION_QUIET, UNALIGNED_ACTION_SIGNAL, @@ -107,14 +109,13 @@ static u32 unaligned_action; extern void show_registers(struct pt_regs *regs); static void emulate_load_store_insn(struct pt_regs *regs, - void __user *addr, unsigned int __user *pc) + void __user *addr, unsigned int *pc) { unsigned long origpc, orig31, value; union mips_instruction insn; unsigned int res; -#ifdef CONFIG_EVA - mm_segment_t seg; -#endif + bool user = user_mode(regs); + origpc = (unsigned long)pc; orig31 = regs->regs[31]; @@ -123,7 +124,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, /* * This load never faults. */ - __get_user(insn.word, pc); + __get_inst32(&insn.word, pc, user); switch (insn.i_format.opcode) { /* @@ -163,7 +164,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (insn.dsp_format.func == lx_op) { switch (insn.dsp_format.op) { case lwx_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) @@ -172,7 +173,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.dsp_format.rd] = value; break; case lhx_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) @@ -191,93 +192,66 @@ static void emulate_load_store_insn(struct pt_regs *regs, * memory, so we need to "switch" the address limit to * user space, so that address check can work properly. */ - seg = force_uaccess_begin(); switch (insn.spec3_format.func) { case lhe_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } LoadHWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case lwe_op: - if (!access_ok(addr, 4)) { - force_uaccess_end(seg); + if (!access_ok(addr, 4)) goto sigbus; - } LoadWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case lhue_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } LoadHWUE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case she_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreHWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } break; case swe_op: - if (!access_ok(addr, 4)) { - force_uaccess_end(seg); + if (!access_ok(addr, 4)) goto sigbus; - } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } break; default: - force_uaccess_end(seg); goto sigill; } - force_uaccess_end(seg); } #endif break; case lh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadHW(addr, value, res); - else - LoadHWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadHWE(addr, value, res); + else LoadHW(addr, value, res); - } if (res) goto fault; @@ -286,17 +260,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lw_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadW(addr, value, res); - else - LoadWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadWE(addr, value, res); + else LoadW(addr, value, res); - } if (res) goto fault; @@ -305,17 +275,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhu_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadHWU(addr, value, res); - else - LoadHWUE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadHWUE(addr, value, res); + else LoadHWU(addr, value, res); - } if (res) goto fault; @@ -332,7 +298,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -355,7 +321,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -370,40 +336,32 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigill; case sh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - StoreHW(addr, value, res); - else - StoreHWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + StoreHWE(addr, value, res); + else StoreHW(addr, value, res); - } if (res) goto fault; break; case sw_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - StoreW(addr, value, res); - else - StoreWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + StoreWE(addr, value, res); + else StoreW(addr, value, res); - } if (res) goto fault; @@ -418,7 +376,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; compute_return_epc(regs); @@ -626,6 +584,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, unsigned long origpc, contpc; union mips_instruction insn; struct mm_decoded_insn mminsn; + bool user = user_mode(regs); origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -689,7 +648,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadW(addr, value, res); @@ -708,7 +667,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -728,7 +687,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 16)) + if (user && !access_ok(addr, 16)) goto sigbus; LoadDW(addr, value, res); @@ -751,7 +710,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 16)) + if (user && !access_ok(addr, 16)) goto sigbus; value = regs->regs[reg]; @@ -774,10 +733,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 4 * (rvar + 1))) + if (user && !access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -810,10 +769,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 4 * (rvar + 1))) + if (user && !access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -847,10 +806,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 8 * (rvar + 1))) + if (user && !access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 8 * rvar)) + if (user && !access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -888,10 +847,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 8 * (rvar + 1))) + if (user && !access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 8 * rvar)) + if (user && !access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -1010,7 +969,7 @@ fpu_emul: case mm_lwm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1030,7 +989,7 @@ fpu_emul: case mm_swm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1084,7 +1043,7 @@ fpu_emul: } loadHW: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1094,7 +1053,7 @@ loadHW: goto success; loadHWU: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1104,7 +1063,7 @@ loadHWU: goto success; loadW: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1122,7 +1081,7 @@ loadWU: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1144,7 +1103,7 @@ loadDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1158,7 +1117,7 @@ loadDW: goto sigill; storeHW: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; value = regs->regs[reg]; @@ -1168,7 +1127,7 @@ storeHW: goto success; storeW: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; value = regs->regs[reg]; @@ -1186,7 +1145,7 @@ storeDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -1243,6 +1202,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) union mips16e_instruction mips16inst, oldinst; unsigned int opcode; int extended = 0; + bool user = user_mode(regs); origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -1344,7 +1304,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigbus; case MIPS16e_lh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1355,7 +1315,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) break; case MIPS16e_lhu_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1368,7 +1328,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) case MIPS16e_lw_op: case MIPS16e_lwpc_op: case MIPS16e_lwsp_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1387,7 +1347,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1411,7 +1371,7 @@ loadDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1426,7 +1386,7 @@ loadDW: goto sigill; case MIPS16e_sh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1439,7 +1399,7 @@ loadDW: case MIPS16e_sw_op: case MIPS16e_swsp_op: case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1459,7 +1419,7 @@ writeDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1515,8 +1475,7 @@ sigill: asmlinkage void do_ade(struct pt_regs *regs) { enum ctx_state prev_state; - unsigned int __user *pc; - mm_segment_t seg; + unsigned int *pc; prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, @@ -1551,24 +1510,14 @@ asmlinkage void do_ade(struct pt_regs *regs) show_registers(regs); if (cpu_has_mmips) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_microMIPS(regs, (void __user *)regs->cp0_badvaddr); - set_fs(seg); - return; } if (cpu_has_mips16) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_MIPS16e(regs, (void __user *)regs->cp0_badvaddr); - set_fs(seg); - return; } @@ -1577,13 +1526,9 @@ asmlinkage void do_ade(struct pt_regs *regs) if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); - pc = (unsigned int __user *)exception_epc(regs); + pc = (unsigned int *)exception_epc(regs); - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); - set_fs(seg); return; diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 7d0b91ad2581..3d0cf471f2fe 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -90,7 +90,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; + unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; struct vm_area_struct *vma; int ret; @@ -158,7 +158,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map GIC user page. */ if (gic_size) { - gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; + gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; + gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT; ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(vma->vm_page_prot)); diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 032b3fca6cbb..a77297480f56 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -30,40 +30,6 @@ config KVM help Support for hosting Guest kernels. -choice - prompt "Virtualization mode" - depends on KVM - default KVM_MIPS_TE - -config KVM_MIPS_TE - bool "Trap & Emulate" - depends on CPU_MIPS32_R2 - help - Use trap and emulate to virtualize 32-bit guests in user mode. This - does not require any special hardware Virtualization support beyond - standard MIPS32 r2 or later, but it does require the guest kernel - to be configured with CONFIG_KVM_GUEST=y so that it resides in the - user address segment. - -config KVM_MIPS_VZ - bool "MIPS Virtualization (VZ) ASE" - help - Use the MIPS Virtualization (VZ) ASE to virtualize guests. This - supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n), - but requires hardware support. - -endchoice - -config KVM_MIPS_DYN_TRANS - bool "KVM/MIPS: Dynamic binary translation to reduce traps" - depends on KVM_MIPS_TE - default y - help - When running in Trap & Emulate mode patch privileged - instructions to reduce the number of traps. - - If unsure, say Y. - config KVM_MIPS_DEBUG_COP0_COUNTERS bool "Maintain counters for COP0 accesses" depends on KVM diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 506c4ac0ba1c..30cc060857c7 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -9,7 +9,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ - interrupt.o stats.o commpage.o \ + interrupt.o stats.o \ fpu.o kvm-objs += hypcall.o kvm-objs += mmu.o @@ -17,11 +17,6 @@ ifdef CONFIG_CPU_LOONGSON64 kvm-objs += loongson_ipi.o endif -ifdef CONFIG_KVM_MIPS_VZ kvm-objs += vz.o -else -kvm-objs += dyntrans.o -kvm-objs += trap_emul.o -endif obj-$(CONFIG_KVM) += kvm.o obj-y += callback.o tlb.o diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c deleted file mode 100644 index 5812e6145801..000000000000 --- a/arch/mips/kvm/commpage.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * commpage, currently used for Virtual COP0 registers. - * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/memblock.h> -#include <asm/page.h> -#include <asm/cacheflush.h> -#include <asm/mmu_context.h> - -#include <linux/kvm_host.h> - -#include "commpage.h" - -void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) -{ - struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; - - /* Specific init values for fields */ - vcpu->arch.cop0 = &page->cop0; -} diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h deleted file mode 100644 index 08c5fa2bbc0f..000000000000 --- a/arch/mips/kvm/commpage.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * KVM/MIPS: commpage: mapped into get kernel space - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#ifndef __KVM_MIPS_COMMPAGE_H__ -#define __KVM_MIPS_COMMPAGE_H__ - -struct kvm_mips_commpage { - /* COP0 state is mapped into Guest kernel via commpage */ - struct mips_coproc cop0; -}; - -#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 - -extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); - -#endif /* __KVM_MIPS_COMMPAGE_H__ */ diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c deleted file mode 100644 index d77b61b3d6ee..000000000000 --- a/arch/mips/kvm/dyntrans.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/highmem.h> -#include <linux/kvm_host.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/memblock.h> -#include <asm/cacheflush.h> - -#include "commpage.h" - -/** - * kvm_mips_trans_replace() - Replace trapping instruction in guest memory. - * @vcpu: Virtual CPU. - * @opc: PC of instruction to replace. - * @replace: Instruction to write - */ -static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, - union mips_instruction replace) -{ - unsigned long vaddr = (unsigned long)opc; - int err; - -retry: - /* The GVA page table is still active so use the Linux TLB handlers */ - kvm_trap_emul_gva_lockless_begin(vcpu); - err = put_user(replace.word, opc); - kvm_trap_emul_gva_lockless_end(vcpu); - - if (unlikely(err)) { - /* - * We write protect clean pages in GVA page table so normal - * Linux TLB mod handler doesn't silently dirty the page. - * Its also possible we raced with a GVA invalidation. - * Try to force the page to become dirty. - */ - err = kvm_trap_emul_gva_fault(vcpu, vaddr, true); - if (unlikely(err)) { - kvm_info("%s: Address unwriteable: %p\n", - __func__, opc); - return -EFAULT; - } - - /* - * Try again. This will likely trigger a TLB refill, which will - * fetch the new dirty entry from the GVA page table, which - * should then succeed. - */ - goto retry; - } - __local_flush_icache_user_range(vaddr, vaddr + 4); - - return 0; -} - -int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction nop_inst = { 0 }; - - /* Replace the CACHE instruction, with a NOP */ - return kvm_mips_trans_replace(vcpu, opc, nop_inst); -} - -/* - * Address based CACHE instructions are transformed into synci(s). A little - * heavy for just D-cache invalidates, but avoids an expensive trap - */ -int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction synci_inst = { 0 }; - - synci_inst.i_format.opcode = bcond_op; - synci_inst.i_format.rs = inst.i_format.rs; - synci_inst.i_format.rt = synci_op; - if (cpu_has_mips_r6) - synci_inst.i_format.simmediate = inst.spec3_format.simmediate; - else - synci_inst.i_format.simmediate = inst.i_format.simmediate; - - return kvm_mips_trans_replace(vcpu, opc, synci_inst); -} - -int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction mfc0_inst = { 0 }; - u32 rd, sel; - - rd = inst.c0r_format.rd; - sel = inst.c0r_format.sel; - - if (rd == MIPS_CP0_ERRCTL && sel == 0) { - mfc0_inst.r_format.opcode = spec_op; - mfc0_inst.r_format.rd = inst.c0r_format.rt; - mfc0_inst.r_format.func = add_op; - } else { - mfc0_inst.i_format.opcode = lw_op; - mfc0_inst.i_format.rt = inst.c0r_format.rt; - mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | - offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); -#ifdef CONFIG_CPU_BIG_ENDIAN - if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) - mfc0_inst.i_format.simmediate |= 4; -#endif - } - - return kvm_mips_trans_replace(vcpu, opc, mfc0_inst); -} - -int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction mtc0_inst = { 0 }; - u32 rd, sel; - - rd = inst.c0r_format.rd; - sel = inst.c0r_format.sel; - - mtc0_inst.i_format.opcode = sw_op; - mtc0_inst.i_format.rt = inst.c0r_format.rt; - mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | - offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); -#ifdef CONFIG_CPU_BIG_ENDIAN - if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) - mtc0_inst.i_format.simmediate |= 4; -#endif - - return kvm_mips_trans_replace(vcpu, opc, mtc0_inst); -} diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d70c4f8e14e2..22e745e49b0a 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -30,7 +30,6 @@ #define CONFIG_MIPS_MT #include "interrupt.h" -#include "commpage.h" #include "trace.h" @@ -276,7 +275,8 @@ int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) *out = vcpu->arch.host_cp0_badinstr; return 0; } else { - return kvm_get_inst(opc, vcpu, out); + WARN_ONCE(1, "CPU doesn't have BadInstr register\n"); + return -EINVAL; } } @@ -297,7 +297,8 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) *out = vcpu->arch.host_cp0_badinstrp; return 0; } else { - return kvm_get_inst(opc, vcpu, out); + WARN_ONCE(1, "CPU doesn't have BadInstrp register\n"); + return -EINVAL; } } @@ -721,7 +722,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) * preemption until the new value is written to prevent restore of a * GTOffset corresponding to the old CP0_Compare value. */ - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) { + if (delta > 0) { preempt_disable(); write_c0_gtoffset(compare - read_c0_count()); back_to_back_c0_hazard(); @@ -734,7 +735,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) if (ack) kvm_mips_callbacks->dequeue_timer_int(vcpu); - else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) + else /* * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so * preserve guest CP0_Cause.TI if we don't want to ack it. @@ -743,15 +744,13 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) kvm_write_c0_guest_compare(cop0, compare); - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - if (delta > 0) - preempt_enable(); + if (delta > 0) + preempt_enable(); - back_to_back_c0_hazard(); + back_to_back_c0_hazard(); - if (!ack && cause & CAUSEF_TI) - kvm_write_c0_guest_cause(cop0, cause); - } + if (!ack && cause & CAUSEF_TI) + kvm_write_c0_guest_cause(cop0, cause); /* resume_hrtimer() takes care of timer interrupts > count */ if (!dc) @@ -762,7 +761,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) * until after the new CP0_Compare is written, otherwise new guest * CP0_Count could hit new guest CP0_Compare. */ - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0) + if (delta <= 0) write_c0_gtoffset(compare - read_c0_count()); } @@ -943,29 +942,6 @@ enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) return HRTIMER_RESTART; } -enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; - - if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { - kvm_clear_c0_guest_status(cop0, ST0_ERL); - vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); - } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { - kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, - kvm_read_c0_guest_epc(cop0)); - kvm_clear_c0_guest_status(cop0, ST0_EXL); - vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); - - } else { - kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", - vcpu->arch.pc); - er = EMULATE_FAIL; - } - - return er; -} - enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) { kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, @@ -991,609 +967,6 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, - unsigned long entryhi) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - int cpu, i; - u32 nasid = entryhi & KVM_ENTRYHI_ASID; - - if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { - trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & - KVM_ENTRYHI_ASID, nasid); - - /* - * Flush entries from the GVA page tables. - * Guest user page table will get flushed lazily on re-entry to - * guest user if the guest ASID actually changes. - */ - kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); - - /* - * Regenerate/invalidate kernel MMU context. - * The user MMU context will be regenerated lazily on re-entry - * to guest user if the guest ASID actually changes. - */ - preempt_disable(); - cpu = smp_processor_id(); - get_new_mmu_context(kern_mm); - for_each_possible_cpu(i) - if (i != cpu) - set_cpu_context(i, kern_mm, 0); - preempt_enable(); - } - kvm_write_c0_guest_entryhi(cop0, entryhi); -} - -enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb *tlb; - unsigned long pc = vcpu->arch.pc; - int index; - - index = kvm_read_c0_guest_index(cop0); - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { - /* UNDEFINED */ - kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index); - index &= KVM_MIPS_GUEST_TLB_SIZE - 1; - } - - tlb = &vcpu->arch.guest_tlb[index]; - kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); - kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); - kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); - kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); - - return EMULATE_DONE; -} - -/** - * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. - * @vcpu: VCPU with changed mappings. - * @tlb: TLB entry being removed. - * - * This is called to indicate a single change in guest MMU mappings, so that we - * can arrange TLB flushes on this and other CPUs. - */ -static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - int cpu, i; - bool user; - - /* No need to flush for entries which are already invalid */ - if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) - return; - /* Don't touch host kernel page tables or TLB mappings */ - if ((unsigned long)tlb->tlb_hi > 0x7fffffff) - return; - /* User address space doesn't need flushing for KSeg2/3 changes */ - user = tlb->tlb_hi < KVM_GUEST_KSEG0; - - preempt_disable(); - - /* Invalidate page table entries */ - kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); - - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); - - /* Invalidate the whole ASID on other CPUs */ - cpu = smp_processor_id(); - for_each_possible_cpu(i) { - if (i == cpu) - continue; - if (user) - set_cpu_context(i, user_mm, 0); - set_cpu_context(i, kern_mm, 0); - } - - preempt_enable(); -} - -/* Write Guest TLB Entry @ Index */ -enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - int index = kvm_read_c0_guest_index(cop0); - struct kvm_mips_tlb *tlb = NULL; - unsigned long pc = vcpu->arch.pc; - - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { - kvm_debug("%s: illegal index: %d\n", __func__, index); - kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0), - kvm_read_c0_guest_pagemask(cop0)); - index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; - } - - tlb = &vcpu->arch.guest_tlb[index]; - - kvm_mips_invalidate_guest_tlb(vcpu, tlb); - - tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); - tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); - tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); - tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); - - kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0), - kvm_read_c0_guest_pagemask(cop0)); - - return EMULATE_DONE; -} - -/* Write Guest TLB Entry @ Random Index */ -enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb *tlb = NULL; - unsigned long pc = vcpu->arch.pc; - int index; - - index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); - tlb = &vcpu->arch.guest_tlb[index]; - - kvm_mips_invalidate_guest_tlb(vcpu, tlb); - - tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); - tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); - tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); - tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); - - kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", - pc, index, kvm_read_c0_guest_entryhi(cop0), - kvm_read_c0_guest_entrylo0(cop0), - kvm_read_c0_guest_entrylo1(cop0)); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - long entryhi = kvm_read_c0_guest_entryhi(cop0); - unsigned long pc = vcpu->arch.pc; - int index = -1; - - index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); - - kvm_write_c0_guest_index(cop0, index); - - kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, - index); - - return EMULATE_DONE; -} - -/** - * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config1 CP0 - * register, by userland (currently read-only to the guest). - */ -unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) -{ - unsigned int mask = 0; - - /* Permit FPU to be present if FPU is supported */ - if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) - mask |= MIPS_CONF1_FP; - - return mask; -} - -/** - * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config3 CP0 - * register, by userland (currently read-only to the guest). - */ -unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) -{ - /* Config4 and ULRI are optional */ - unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; - - /* Permit MSA to be present if MSA is supported */ - if (kvm_mips_guest_can_have_msa(&vcpu->arch)) - mask |= MIPS_CONF3_MSA; - - return mask; -} - -/** - * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config4 CP0 - * register, by userland (currently read-only to the guest). - */ -unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) -{ - /* Config5 is optional */ - unsigned int mask = MIPS_CONF_M; - - /* KScrExist */ - mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; - - return mask; -} - -/** - * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 - * @vcpu: Virtual CPU. - * - * Finds the mask of bits which are writable in the guest's Config5 CP0 - * register, by the guest itself. - */ -unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) -{ - unsigned int mask = 0; - - /* Permit MSAEn changes if MSA supported and enabled */ - if (kvm_mips_guest_has_msa(&vcpu->arch)) - mask |= MIPS_CONF5_MSAEN; - - /* - * Permit guest FPU mode changes if FPU is enabled and the relevant - * feature exists according to FIR register. - */ - if (kvm_mips_guest_has_fpu(&vcpu->arch)) { - if (cpu_has_fre) - mask |= MIPS_CONF5_FRE; - /* We don't support UFR or UFE */ - } - - return mask; -} - -enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, - u32 *opc, u32 cause, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; - u32 rt, rd, sel; - unsigned long curr_pc; - - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; - er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) - return er; - - if (inst.co_format.co) { - switch (inst.co_format.func) { - case tlbr_op: /* Read indexed TLB entry */ - er = kvm_mips_emul_tlbr(vcpu); - break; - case tlbwi_op: /* Write indexed */ - er = kvm_mips_emul_tlbwi(vcpu); - break; - case tlbwr_op: /* Write random */ - er = kvm_mips_emul_tlbwr(vcpu); - break; - case tlbp_op: /* TLB Probe */ - er = kvm_mips_emul_tlbp(vcpu); - break; - case rfe_op: - kvm_err("!!!COP0_RFE!!!\n"); - break; - case eret_op: - er = kvm_mips_emul_eret(vcpu); - goto dont_update_pc; - case wait_op: - er = kvm_mips_emul_wait(vcpu); - break; - case hypcall_op: - er = kvm_mips_emul_hypcall(vcpu, inst); - break; - } - } else { - rt = inst.c0r_format.rt; - rd = inst.c0r_format.rd; - sel = inst.c0r_format.sel; - - switch (inst.c0r_format.rs) { - case mfc_op: -#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS - cop0->stat[rd][sel]++; -#endif - /* Get reg */ - if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { - vcpu->arch.gprs[rt] = - (s32)kvm_mips_read_count(vcpu); - } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { - vcpu->arch.gprs[rt] = 0x0; -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mfc0(inst, opc, vcpu); -#endif - } else { - vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mfc0(inst, opc, vcpu); -#endif - } - - trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - break; - - case dmfc_op: - vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; - - trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - break; - - case mtc_op: -#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS - cop0->stat[rd][sel]++; -#endif - trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - - if ((rd == MIPS_CP0_TLB_INDEX) - && (vcpu->arch.gprs[rt] >= - KVM_MIPS_GUEST_TLB_SIZE)) { - kvm_err("Invalid TLB Index: %ld", - vcpu->arch.gprs[rt]); - er = EMULATE_FAIL; - break; - } - if ((rd == MIPS_CP0_PRID) && (sel == 1)) { - /* - * Preserve core number, and keep the exception - * base in guest KSeg0. - */ - kvm_change_c0_guest_ebase(cop0, 0x1ffff000, - vcpu->arch.gprs[rt]); - } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { - kvm_mips_change_entryhi(vcpu, - vcpu->arch.gprs[rt]); - } - /* Are we writing to COUNT */ - else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { - kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); - goto done; - } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { - /* If we are writing to COMPARE */ - /* Clear pending timer interrupt, if any */ - kvm_mips_write_compare(vcpu, - vcpu->arch.gprs[rt], - true); - } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { - unsigned int old_val, val, change; - - old_val = kvm_read_c0_guest_status(cop0); - val = vcpu->arch.gprs[rt]; - change = val ^ old_val; - - /* Make sure that the NMI bit is never set */ - val &= ~ST0_NMI; - - /* - * Don't allow CU1 or FR to be set unless FPU - * capability enabled and exists in guest - * configuration. - */ - if (!kvm_mips_guest_has_fpu(&vcpu->arch)) - val &= ~(ST0_CU1 | ST0_FR); - - /* - * Also don't allow FR to be set if host doesn't - * support it. - */ - if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) - val &= ~ST0_FR; - - - /* Handle changes in FPU mode */ - preempt_disable(); - - /* - * FPU and Vector register state is made - * UNPREDICTABLE by a change of FR, so don't - * even bother saving it. - */ - if (change & ST0_FR) - kvm_drop_fpu(vcpu); - - /* - * If MSA state is already live, it is undefined - * how it interacts with FR=0 FPU state, and we - * don't want to hit reserved instruction - * exceptions trying to save the MSA state later - * when CU=1 && FR=1, so play it safe and save - * it first. - */ - if (change & ST0_CU1 && !(val & ST0_FR) && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) - kvm_lose_fpu(vcpu); - - /* - * Propagate CU1 (FPU enable) changes - * immediately if the FPU context is already - * loaded. When disabling we leave the context - * loaded so it can be quickly enabled again in - * the near future. - */ - if (change & ST0_CU1 && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) - change_c0_status(ST0_CU1, val); - - preempt_enable(); - - kvm_write_c0_guest_status(cop0, val); - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* - * If FPU present, we need CU1/FR bits to take - * effect fairly soon. - */ - if (!kvm_mips_guest_has_fpu(&vcpu->arch)) - kvm_mips_trans_mtc0(inst, opc, vcpu); -#endif - } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { - unsigned int old_val, val, change, wrmask; - - old_val = kvm_read_c0_guest_config5(cop0); - val = vcpu->arch.gprs[rt]; - - /* Only a few bits are writable in Config5 */ - wrmask = kvm_mips_config5_wrmask(vcpu); - change = (val ^ old_val) & wrmask; - val = old_val ^ change; - - - /* Handle changes in FPU/MSA modes */ - preempt_disable(); - - /* - * Propagate FRE changes immediately if the FPU - * context is already loaded. - */ - if (change & MIPS_CONF5_FRE && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) - change_c0_config5(MIPS_CONF5_FRE, val); - - /* - * Propagate MSAEn changes immediately if the - * MSA context is already loaded. When disabling - * we leave the context loaded so it can be - * quickly enabled again in the near future. - */ - if (change & MIPS_CONF5_MSAEN && - vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) - change_c0_config5(MIPS_CONF5_MSAEN, - val); - - preempt_enable(); - - kvm_write_c0_guest_config5(cop0, val); - } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { - u32 old_cause, new_cause; - - old_cause = kvm_read_c0_guest_cause(cop0); - new_cause = vcpu->arch.gprs[rt]; - /* Update R/W bits */ - kvm_change_c0_guest_cause(cop0, 0x08800300, - new_cause); - /* DC bit enabling/disabling timer? */ - if ((old_cause ^ new_cause) & CAUSEF_DC) { - if (new_cause & CAUSEF_DC) - kvm_mips_count_disable_cause(vcpu); - else - kvm_mips_count_enable_cause(vcpu); - } - } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { - u32 mask = MIPS_HWRENA_CPUNUM | - MIPS_HWRENA_SYNCISTEP | - MIPS_HWRENA_CC | - MIPS_HWRENA_CCRES; - - if (kvm_read_c0_guest_config3(cop0) & - MIPS_CONF3_ULRI) - mask |= MIPS_HWRENA_ULR; - cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; - } else { - cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mtc0(inst, opc, vcpu); -#endif - } - break; - - case dmtc_op: - kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", - vcpu->arch.pc, rt, rd, sel); - trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, - KVM_TRACE_COP0(rd, sel), - vcpu->arch.gprs[rt]); - er = EMULATE_FAIL; - break; - - case mfmc0_op: -#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS - cop0->stat[MIPS_CP0_STATUS][0]++; -#endif - if (rt != 0) - vcpu->arch.gprs[rt] = - kvm_read_c0_guest_status(cop0); - /* EI */ - if (inst.mfmc0_format.sc) { - kvm_debug("[%#lx] mfmc0_op: EI\n", - vcpu->arch.pc); - kvm_set_c0_guest_status(cop0, ST0_IE); - } else { - kvm_debug("[%#lx] mfmc0_op: DI\n", - vcpu->arch.pc); - kvm_clear_c0_guest_status(cop0, ST0_IE); - } - - break; - - case wrpgpr_op: - { - u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; - u32 pss = - (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; - /* - * We don't support any shadow register sets, so - * SRSCtl[PSS] == SRSCtl[CSS] = 0 - */ - if (css || pss) { - er = EMULATE_FAIL; - break; - } - kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, - vcpu->arch.gprs[rt]); - vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; - } - break; - default: - kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", - vcpu->arch.pc, inst.c0r_format.rs); - er = EMULATE_FAIL; - break; - } - } - -done: - /* Rollback PC only if emulation was unsuccessful */ - if (er == EMULATE_FAIL) - vcpu->arch.pc = curr_pc; - -dont_update_pc: - /* - * This is for special instructions whose emulation - * updates the PC, so do not overwrite the PC under - * any circumstances - */ - - return er; -} - enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, u32 cause, struct kvm_vcpu *vcpu) @@ -1623,7 +996,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, goto out_fail; switch (inst.i_format.opcode) { -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case sd_op: run->mmio.len = 8; *(u64 *)data = vcpu->arch.gprs[rt]; @@ -1721,7 +1094,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, vcpu->arch.gprs[rt], *(u32 *)data); break; -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case sdl_op: run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( vcpu->arch.host_cp0_badvaddr) & (~0x7); @@ -1928,7 +1301,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, vcpu->mmio_needed = 2; /* signed */ switch (op) { -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case ld_op: run->mmio.len = 8; break; @@ -2003,7 +1376,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, } break; -#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) +#if defined(CONFIG_64BIT) case ldl_op: run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( vcpu->arch.host_cp0_badvaddr) & (~0x7); @@ -2135,815 +1508,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, return EMULATE_DO_MMIO; } -#ifndef CONFIG_KVM_MIPS_VZ -static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), - unsigned long curr_pc, - unsigned long addr, - struct kvm_vcpu *vcpu, - u32 cause) -{ - int err; - - for (;;) { - /* Carefully attempt the cache operation */ - kvm_trap_emul_gva_lockless_begin(vcpu); - err = fn(addr); - kvm_trap_emul_gva_lockless_end(vcpu); - - if (likely(!err)) - return EMULATE_DONE; - - /* - * Try to handle the fault and retry, maybe we just raced with a - * GVA invalidation. - */ - switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { - case KVM_MIPS_GVA: - case KVM_MIPS_GPA: - /* bad virtual or physical address */ - return EMULATE_FAIL; - case KVM_MIPS_TLB: - /* no matching guest TLB */ - vcpu->arch.host_cp0_badvaddr = addr; - vcpu->arch.pc = curr_pc; - kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu); - return EMULATE_EXCEPT; - case KVM_MIPS_TLBINV: - /* invalid matching guest TLB */ - vcpu->arch.host_cp0_badvaddr = addr; - vcpu->arch.pc = curr_pc; - kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu); - return EMULATE_EXCEPT; - default: - break; - } - } -} - -enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, - u32 *opc, u32 cause, - struct kvm_vcpu *vcpu) -{ - enum emulation_result er = EMULATE_DONE; - u32 cache, op_inst, op, base; - s16 offset; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long va; - unsigned long curr_pc; - - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; - er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) - return er; - - base = inst.i_format.rs; - op_inst = inst.i_format.rt; - if (cpu_has_mips_r6) - offset = inst.spec3_format.simmediate; - else - offset = inst.i_format.simmediate; - cache = op_inst & CacheOp_Cache; - op = op_inst & CacheOp_Op; - - va = arch->gprs[base] + offset; - - kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - cache, op, base, arch->gprs[base], offset); - - /* - * Treat INDEX_INV as a nop, basically issued by Linux on startup to - * invalidate the caches entirely by stepping through all the - * ways/indexes - */ - if (op == Index_Writeback_Inv) { - kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, - arch->gprs[base], offset); - - if (cache == Cache_D) { -#ifdef CONFIG_CPU_R4K_CACHE_TLB - r4k_blast_dcache(); -#else - switch (boot_cpu_type()) { - case CPU_CAVIUM_OCTEON3: - /* locally flush icache */ - local_flush_icache_range(0, 0); - break; - default: - __flush_cache_all(); - break; - } -#endif - } else if (cache == Cache_I) { -#ifdef CONFIG_CPU_R4K_CACHE_TLB - r4k_blast_icache(); -#else - switch (boot_cpu_type()) { - case CPU_CAVIUM_OCTEON3: - /* locally flush icache */ - local_flush_icache_range(0, 0); - break; - default: - flush_icache_all(); - break; - } -#endif - } else { - kvm_err("%s: unsupported CACHE INDEX operation\n", - __func__); - return EMULATE_FAIL; - } - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_cache_index(inst, opc, vcpu); -#endif - goto done; - } - - /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ - if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { - /* - * Perform the dcache part of icache synchronisation on the - * guest's behalf. - */ - er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, - curr_pc, va, vcpu, cause); - if (er != EMULATE_DONE) - goto done; -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* - * Replace the CACHE instruction, with a SYNCI, not the same, - * but avoids a trap - */ - kvm_mips_trans_cache_va(inst, opc, vcpu); -#endif - } else if (op_inst == Hit_Invalidate_I) { - /* Perform the icache synchronisation on the guest's behalf */ - er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, - curr_pc, va, vcpu, cause); - if (er != EMULATE_DONE) - goto done; - er = kvm_mips_guest_cache_op(protected_flush_icache_line, - curr_pc, va, vcpu, cause); - if (er != EMULATE_DONE) - goto done; - -#ifdef CONFIG_KVM_MIPS_DYN_TRANS - /* Replace the CACHE instruction, with a SYNCI */ - kvm_mips_trans_cache_va(inst, opc, vcpu); -#endif - } else { - kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", - cache, op, base, arch->gprs[base], offset); - er = EMULATE_FAIL; - } - -done: - /* Rollback PC only if emulation was unsuccessful */ - if (er == EMULATE_FAIL) - vcpu->arch.pc = curr_pc; - /* Guest exception needs guest to resume */ - if (er == EMULATE_EXCEPT) - er = EMULATE_DONE; - - return er; -} - -enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, - struct kvm_vcpu *vcpu) -{ - union mips_instruction inst; - enum emulation_result er = EMULATE_DONE; - int err; - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) - return EMULATE_FAIL; - - switch (inst.r_format.opcode) { - case cop0_op: - er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu); - break; - -#ifndef CONFIG_CPU_MIPSR6 - case cache_op: - ++vcpu->stat.cache_exits; - trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); - er = kvm_mips_emulate_cache(inst, opc, cause, vcpu); - break; -#else - case spec3_op: - switch (inst.spec3_format.func) { - case cache6_op: - ++vcpu->stat.cache_exits; - trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); - er = kvm_mips_emulate_cache(inst, opc, cause, - vcpu); - break; - default: - goto unknown; - } - break; -unknown: -#endif - - default: - kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, - inst.word); - kvm_arch_vcpu_dump_regs(vcpu); - er = EMULATE_FAIL; - break; - } - - return er; -} -#endif /* CONFIG_KVM_MIPS_VZ */ - -/** - * kvm_mips_guest_exception_base() - Find guest exception vector base address. - * - * Returns: The base address of the current guest exception vector, taking - * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. - */ -long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - if (kvm_read_c0_guest_status(cop0) & ST0_BEV) - return KVM_GUEST_CKSEG1ADDR(0x1fc00200); - else - return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; -} - -enum emulation_result kvm_mips_emulate_syscall(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_SYS << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", - arch->pc); - - /* set pc to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; - - } else { - kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", - arch->pc); - - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - } - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBL << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = - (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", - arch->pc); - } else { - kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", - arch->pc); - } - - /* set pc to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBL << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; - } else { - kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - } - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBS << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - } else { - kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", - arch->pc); - } - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TLBS << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - struct kvm_vcpu_arch *arch = &vcpu->arch; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", - arch->pc); - } else { - kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", - arch->pc); - } - - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_MOD << CAUSEB_EXCCODE)); - - /* setup badvaddr, context and entryhi registers for the guest */ - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - /* XXXKYMA: is the context register used by linux??? */ - kvm_write_c0_guest_entryhi(cop0, entryhi); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - } - - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_CPU << CAUSEB_EXCCODE)); - kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); - - return EMULATE_DONE; -} - -enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_RI << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver RI when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_BP << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver BP when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_TR << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver TRAP when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_FPE << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver FPE when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); - - kvm_change_c0_guest_cause(cop0, (0xff), - (EXCCODE_MSADIS << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - - } else { - kvm_err("Trying to deliver MSADIS when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, - struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - unsigned long curr_pc; - union mips_instruction inst; - int err; - - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; - er = update_pc(vcpu, cause); - if (er == EMULATE_FAIL) - return er; - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) { - kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); - return EMULATE_FAIL; - } - - if (inst.r_format.opcode == spec3_op && - inst.r_format.func == rdhwr_op && - inst.r_format.rs == 0 && - (inst.r_format.re >> 3) == 0) { - int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); - int rd = inst.r_format.rd; - int rt = inst.r_format.rt; - int sel = inst.r_format.re & 0x7; - - /* If usermode, check RDHWR rd is allowed by guest HWREna */ - if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { - kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", - rd, opc); - goto emulate_ri; - } - switch (rd) { - case MIPS_HWR_CPUNUM: /* CPU number */ - arch->gprs[rt] = vcpu->vcpu_id; - break; - case MIPS_HWR_SYNCISTEP: /* SYNCI length */ - arch->gprs[rt] = min(current_cpu_data.dcache.linesz, - current_cpu_data.icache.linesz); - break; - case MIPS_HWR_CC: /* Read count register */ - arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); - break; - case MIPS_HWR_CCRES: /* Count register resolution */ - switch (current_cpu_data.cputype) { - case CPU_20KC: - case CPU_25KF: - arch->gprs[rt] = 1; - break; - default: - arch->gprs[rt] = 2; - } - break; - case MIPS_HWR_ULR: /* Read UserLocal register */ - arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); - break; - - default: - kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); - goto emulate_ri; - } - - trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), - vcpu->arch.gprs[rt]); - } else { - kvm_debug("Emulate RI not supported @ %p: %#x\n", - opc, inst.word); - goto emulate_ri; - } - - return EMULATE_DONE; - -emulate_ri: - /* - * Rollback PC (if in branch delay slot then the PC already points to - * branch target), and pass the RI exception to the guest OS. - */ - vcpu->arch.pc = curr_pc; - return kvm_mips_emulate_ri_exc(cause, opc, vcpu); -} - enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -3086,207 +1650,3 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) done: return er; } - -static enum emulation_result kvm_mips_emulate_exc(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_vcpu_arch *arch = &vcpu->arch; - enum emulation_result er = EMULATE_DONE; - - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_change_c0_guest_cause(cop0, (0xff), - (exccode << CAUSEB_EXCCODE)); - - /* Set PC to the exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); - - kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", - exccode, kvm_read_c0_guest_epc(cop0), - kvm_read_c0_guest_badvaddr(cop0)); - } else { - kvm_err("Trying to deliver EXC when EXL is already set\n"); - er = EMULATE_FAIL; - } - - return er; -} - -enum emulation_result kvm_mips_check_privilege(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu) -{ - enum emulation_result er = EMULATE_DONE; - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - - int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); - - if (usermode) { - switch (exccode) { - case EXCCODE_INT: - case EXCCODE_SYS: - case EXCCODE_BP: - case EXCCODE_RI: - case EXCCODE_TR: - case EXCCODE_MSAFPE: - case EXCCODE_FPE: - case EXCCODE_MSADIS: - break; - - case EXCCODE_CPU: - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) - er = EMULATE_PRIV_FAIL; - break; - - case EXCCODE_MOD: - break; - - case EXCCODE_TLBL: - /* - * We we are accessing Guest kernel space, then send an - * address error exception to the guest - */ - if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { - kvm_debug("%s: LD MISS @ %#lx\n", __func__, - badvaddr); - cause &= ~0xff; - cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); - er = EMULATE_PRIV_FAIL; - } - break; - - case EXCCODE_TLBS: - /* - * We we are accessing Guest kernel space, then send an - * address error exception to the guest - */ - if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { - kvm_debug("%s: ST MISS @ %#lx\n", __func__, - badvaddr); - cause &= ~0xff; - cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); - er = EMULATE_PRIV_FAIL; - } - break; - - case EXCCODE_ADES: - kvm_debug("%s: address error ST @ %#lx\n", __func__, - badvaddr); - if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { - cause &= ~0xff; - cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); - } - er = EMULATE_PRIV_FAIL; - break; - case EXCCODE_ADEL: - kvm_debug("%s: address error LD @ %#lx\n", __func__, - badvaddr); - if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { - cause &= ~0xff; - cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); - } - er = EMULATE_PRIV_FAIL; - break; - default: - er = EMULATE_PRIV_FAIL; - break; - } - } - - if (er == EMULATE_PRIV_FAIL) - kvm_mips_emulate_exc(cause, opc, vcpu); - - return er; -} - -/* - * User Address (UA) fault, this could happen if - * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this - * case we pass on the fault to the guest kernel and let it handle it. - * (2) TLB entry is present in the Guest TLB but not in the shadow, in this - * case we inject the TLB from the Guest TLB into the shadow host TLB - */ -enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, - u32 *opc, - struct kvm_vcpu *vcpu, - bool write_fault) -{ - enum emulation_result er = EMULATE_DONE; - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - unsigned long va = vcpu->arch.host_cp0_badvaddr; - int index; - - kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", - vcpu->arch.host_cp0_badvaddr); - - /* - * KVM would not have got the exception if this entry was valid in the - * shadow host TLB. Check the Guest TLB, if the entry is not there then - * send the guest an exception. The guest exc handler should then inject - * an entry into the guest TLB. - */ - index = kvm_mips_guest_tlb_lookup(vcpu, - (va & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & - KVM_ENTRYHI_ASID)); - if (index < 0) { - if (exccode == EXCCODE_TLBL) { - er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu); - } else if (exccode == EXCCODE_TLBS) { - er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu); - } else { - kvm_err("%s: invalid exc code: %d\n", __func__, - exccode); - er = EMULATE_FAIL; - } - } else { - struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; - - /* - * Check if the entry is valid, if not then setup a TLB invalid - * exception to the guest - */ - if (!TLB_IS_VALID(*tlb, va)) { - if (exccode == EXCCODE_TLBL) { - er = kvm_mips_emulate_tlbinv_ld(cause, opc, - vcpu); - } else if (exccode == EXCCODE_TLBS) { - er = kvm_mips_emulate_tlbinv_st(cause, opc, - vcpu); - } else { - kvm_err("%s: invalid exc code: %d\n", __func__, - exccode); - er = EMULATE_FAIL; - } - } else { - kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", - tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); - /* - * OK we have a Guest TLB entry, now inject it into the - * shadow host TLB - */ - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, - write_fault)) { - kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", - __func__, va, index, vcpu, - read_c0_entryhi()); - er = EMULATE_FAIL; - } - } - } - - return er; -} diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 832475bf2055..8131fb2bdf97 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -305,7 +305,6 @@ static void *kvm_mips_build_enter_guest(void *addr) UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_MTC0(&p, T0, C0_EPC); -#ifdef CONFIG_KVM_MIPS_VZ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ if (cpu_has_ldpte) UASM_i_MFC0(&p, K0, C0_PWBASE); @@ -367,21 +366,6 @@ static void *kvm_mips_build_enter_guest(void *addr) /* Set the root ASID for the Guest */ UASM_i_ADDIU(&p, T1, S0, offsetof(struct kvm, arch.gpa_mm.context.asid)); -#else - /* Set the ASID for the Guest Kernel or User */ - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); - UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), - T0); - uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); - uasm_i_xori(&p, T0, T0, KSU_USER); - uasm_il_bnez(&p, &r, T0, label_kernel_asid); - UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, - guest_kernel_mm.context.asid)); - /* else user */ - UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, - guest_user_mm.context.asid)); - uasm_l_kernel_asid(&l, p); -#endif /* t1: contains the base of the ASID array, need to get the cpu id */ /* smp_processor_id */ @@ -406,24 +390,9 @@ static void *kvm_mips_build_enter_guest(void *addr) uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); #endif -#ifndef CONFIG_KVM_MIPS_VZ - /* - * Set up KVM T&E GVA pgd. - * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): - * - call tlbmiss_handler_setup_pgd(mm->pgd) - * - but skips write into CP0_PWBase for now - */ - UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - - (int)offsetof(struct mm_struct, context.asid), T1); - - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); - uasm_i_jalr(&p, RA, T9); - uasm_i_mtc0(&p, K0, C0_ENTRYHI); -#else /* Set up KVM VZ root ASID (!guestid) */ uasm_i_mtc0(&p, K0, C0_ENTRYHI); skip_asid_restore: -#endif uasm_i_ehb(&p); /* Disable RDHWR access */ @@ -720,7 +689,6 @@ void *kvm_mips_build_exit(void *addr) uasm_l_msa_1(&l, p); } -#ifdef CONFIG_KVM_MIPS_VZ /* Restore host ASID */ if (!cpu_has_guestid) { UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), @@ -764,7 +732,6 @@ void *kvm_mips_build_exit(void *addr) MIPS_GCTL1_RID_WIDTH); uasm_i_mtc0(&p, T0, C0_GUESTCTL1); } -#endif /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c index d28c2c9c343e..0277942279ea 100644 --- a/arch/mips/kvm/interrupt.c +++ b/arch/mips/kvm/interrupt.c @@ -21,119 +21,6 @@ #include "interrupt.h" -void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) -{ - set_bit(priority, &vcpu->arch.pending_exceptions); -} - -void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) -{ - clear_bit(priority, &vcpu->arch.pending_exceptions); -} - -void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) -{ - /* - * Cause bits to reflect the pending timer interrupt, - * the EXC code will be set when we are actually - * delivering the interrupt: - */ - kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); - - /* Queue up an INT exception for the core */ - kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); - -} - -void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) -{ - kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); - kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); -} - -void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq) -{ - int intr = (int)irq->irq; - - /* - * Cause bits to reflect the pending IO interrupt, - * the EXC code will be set when we are actually - * delivering the interrupt: - */ - kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8)); - kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr)); -} - -void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq) -{ - int intr = (int)irq->irq; - - kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8)); - kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); -} - -/* Deliver the interrupt of the corresponding priority, if possible. */ -int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause) -{ - int allowed = 0; - u32 exccode, ie; - - struct kvm_vcpu_arch *arch = &vcpu->arch; - struct mips_coproc *cop0 = vcpu->arch.cop0; - - if (priority == MIPS_EXC_MAX) - return 0; - - ie = 1 << (kvm_priority_to_irq[priority] + 8); - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) - && (kvm_read_c0_guest_status(cop0) & ie)) { - allowed = 1; - exccode = EXCCODE_INT; - } - - /* Are we allowed to deliver the interrupt ??? */ - if (allowed) { - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { - /* save old pc */ - kvm_write_c0_guest_epc(cop0, arch->pc); - kvm_set_c0_guest_status(cop0, ST0_EXL); - - if (cause & CAUSEF_BD) - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); - else - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); - - kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); - - } else - kvm_err("Trying to deliver interrupt when EXL is already set\n"); - - kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE, - (exccode << CAUSEB_EXCCODE)); - - /* XXXSL Set PC to the interrupt exception entry point */ - arch->pc = kvm_mips_guest_exception_base(vcpu); - if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) - arch->pc += 0x200; - else - arch->pc += 0x180; - - clear_bit(priority, &vcpu->arch.pending_exceptions); - } - - return allowed; -} - -int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause) -{ - return 1; -} - void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) { unsigned long *pending = &vcpu->arch.pending_exceptions; @@ -145,10 +32,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) priority = __ffs(*pending_clr); while (priority <= MIPS_EXC_MAX) { - if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { - if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE) - break; - } + kvm_mips_callbacks->irq_clear(vcpu, priority, cause); priority = find_next_bit(pending_clr, BITS_PER_BYTE * sizeof(*pending_clr), @@ -157,10 +41,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) priority = __ffs(*pending); while (priority <= MIPS_EXC_MAX) { - if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { - if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE) - break; - } + kvm_mips_callbacks->irq_deliver(vcpu, priority, cause); priority = find_next_bit(pending, BITS_PER_BYTE * sizeof(*pending), diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index c3e878ca3e07..e529ea2bb34b 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h @@ -31,29 +31,9 @@ #define C_TI (_ULCAST_(1) << 30) -#ifdef CONFIG_KVM_MIPS_VZ -#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1) -#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1) -#else -#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) -#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) -#endif - extern u32 *kvm_priority_to_irq; u32 kvm_irq_to_priority(u32 irq); -void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); -void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); -void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); -void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); -void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq); -void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, - struct kvm_mips_interrupt *irq); -int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause); -int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, - u32 cause); void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 58a8812e2fa5..29d37ba1bea2 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -30,7 +30,6 @@ #include <linux/kvm_host.h> #include "interrupt.h" -#include "commpage.h" #define CREATE_TRACE_POINTS #include "trace.h" @@ -58,7 +57,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("fpe", fpe_exits), VCPU_STAT("msa_disabled", msa_disabled_exits), VCPU_STAT("flush_dcache", flush_dcache_exits), -#ifdef CONFIG_KVM_MIPS_VZ VCPU_STAT("vz_gpsi", vz_gpsi_exits), VCPU_STAT("vz_gsfc", vz_gsfc_exits), VCPU_STAT("vz_hc", vz_hc_exits), @@ -70,7 +68,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { #ifdef CONFIG_CPU_LOONGSON64 VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), #endif -#endif VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll), VCPU_STAT("halt_poll_invalid", halt_poll_invalid), @@ -139,11 +136,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) switch (type) { case KVM_VM_MIPS_AUTO: break; -#ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: -#else - case KVM_VM_MIPS_TE: -#endif break; default: /* Unsupported KVM type */ @@ -361,7 +354,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ refill_start = gebase; - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) + if (IS_ENABLED(CONFIG_64BIT)) refill_start += 0x080; refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); @@ -397,20 +390,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); - /* - * Allocate comm page for guest kernel, a TLB will be reserved for - * mapping GVA @ 0xFFFF8000 to this page - */ - vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); - - if (!vcpu->arch.kseg0_commpage) { - err = -ENOMEM; - goto out_free_gebase; - } - - kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); - kvm_mips_commpage_init(vcpu); - /* Init */ vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_exec_cpu = -1; @@ -418,12 +397,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Initial guest state */ err = kvm_mips_callbacks->vcpu_setup(vcpu); if (err) - goto out_free_commpage; + goto out_free_gebase; return 0; -out_free_commpage: - kfree(vcpu->arch.kseg0_commpage); out_free_gebase: kfree(gebase); out_uninit_vcpu: @@ -439,7 +416,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kfree(vcpu->arch.guest_ebase); - kfree(vcpu->arch.kseg0_commpage); kvm_mips_callbacks->vcpu_uninit(vcpu); } @@ -1212,10 +1188,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; - /* re-enable HTW before enabling interrupts */ - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) - htw_start(); - /* Set a default exit reason */ run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; @@ -1232,22 +1204,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) cause, opc, run, vcpu); trace_kvm_exit(vcpu, exccode); - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - /* - * Do a privilege check, if in UM most of these exit conditions - * end up causing an exception to be delivered to the Guest - * Kernel - */ - er = kvm_mips_check_privilege(cause, opc, vcpu); - if (er == EMULATE_PRIV_FAIL) { - goto skip_emul; - } else if (er == EMULATE_FAIL) { - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - goto skip_emul; - } - } - switch (exccode) { case EXCCODE_INT: kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); @@ -1357,7 +1313,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) } -skip_emul: local_irq_disable(); if (ret == RESUME_GUEST) @@ -1406,11 +1361,6 @@ skip_emul: read_c0_config5() & MIPS_CONF5_MSAEN) __kvm_restore_msacsr(&vcpu->arch); } - - /* Disable HTW before returning to guest or host */ - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) - htw_stop(); - return ret; } @@ -1429,10 +1379,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) * FR=0 FPU state, and we don't want to hit reserved instruction * exceptions trying to save the MSA state later when CU=1 && FR=1, so * play it safe and save it first. - * - * In theory we shouldn't ever hit this case since kvm_lose_fpu() should - * get called when guest CU1 is set, however we can't trust the guest - * not to clobber the status register directly via the commpage. */ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) @@ -1553,11 +1499,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_disable(); if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - set_c0_config5(MIPS_CONF5_MSAEN); - enable_fpu_hazard(); - } - __kvm_save_msa(&vcpu->arch); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); @@ -1569,11 +1510,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) } vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { - set_c0_status(ST0_CU1); - enable_fpu_hazard(); - } - __kvm_save_fpu(&vcpu->arch); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 3dabeda82458..190ca2451851 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -756,209 +756,6 @@ out: return err; } -static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, - unsigned long addr) -{ - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; - pgd_t *pgdp; - int ret; - - /* We need a minimum of cached pages ready for page table creation */ - ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); - if (ret) - return NULL; - - if (KVM_GUEST_KERNEL_MODE(vcpu)) - pgdp = vcpu->arch.guest_kernel_mm.pgd; - else - pgdp = vcpu->arch.guest_user_mm.pgd; - - return kvm_mips_walk_pgd(pgdp, memcache, addr); -} - -void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, - bool user) -{ - pgd_t *pgdp; - pte_t *ptep; - - addr &= PAGE_MASK << 1; - - pgdp = vcpu->arch.guest_kernel_mm.pgd; - ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); - if (ptep) { - ptep[0] = pfn_pte(0, __pgprot(0)); - ptep[1] = pfn_pte(0, __pgprot(0)); - } - - if (user) { - pgdp = vcpu->arch.guest_user_mm.pgd; - ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); - if (ptep) { - ptep[0] = pfn_pte(0, __pgprot(0)); - ptep[1] = pfn_pte(0, __pgprot(0)); - } - } -} - -/* - * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. - * Flush a range of guest physical address space from the VM's GPA page tables. - */ - -static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva, - unsigned long end_gva) -{ - int i_min = pte_index(start_gva); - int i_max = pte_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); - int i; - - /* - * There's no freeing to do, so there's no point clearing individual - * entries unless only part of the last level page table needs flushing. - */ - if (safe_to_remove) - return true; - - for (i = i_min; i <= i_max; ++i) { - if (!pte_present(pte[i])) - continue; - - set_pte(pte + i, __pte(0)); - } - return false; -} - -static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, - unsigned long end_gva) -{ - pte_t *pte; - unsigned long end = ~0ul; - int i_min = pmd_index(start_gva); - int i_max = pmd_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); - int i; - - for (i = i_min; i <= i_max; ++i, start_gva = 0) { - if (!pmd_present(pmd[i])) - continue; - - pte = pte_offset_kernel(pmd + i, 0); - if (i == i_max) - end = end_gva; - - if (kvm_mips_flush_gva_pte(pte, start_gva, end)) { - pmd_clear(pmd + i); - pte_free_kernel(NULL, pte); - } else { - safe_to_remove = false; - } - } - return safe_to_remove; -} - -static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva, - unsigned long end_gva) -{ - pmd_t *pmd; - unsigned long end = ~0ul; - int i_min = pud_index(start_gva); - int i_max = pud_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); - int i; - - for (i = i_min; i <= i_max; ++i, start_gva = 0) { - if (!pud_present(pud[i])) - continue; - - pmd = pmd_offset(pud + i, 0); - if (i == i_max) - end = end_gva; - - if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { - pud_clear(pud + i); - pmd_free(NULL, pmd); - } else { - safe_to_remove = false; - } - } - return safe_to_remove; -} - -static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, - unsigned long end_gva) -{ - p4d_t *p4d; - pud_t *pud; - unsigned long end = ~0ul; - int i_min = pgd_index(start_gva); - int i_max = pgd_index(end_gva); - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); - int i; - - for (i = i_min; i <= i_max; ++i, start_gva = 0) { - if (!pgd_present(pgd[i])) - continue; - - p4d = p4d_offset(pgd, 0); - pud = pud_offset(p4d + i, 0); - if (i == i_max) - end = end_gva; - - if (kvm_mips_flush_gva_pud(pud, start_gva, end)) { - pgd_clear(pgd + i); - pud_free(NULL, pud); - } else { - safe_to_remove = false; - } - } - return safe_to_remove; -} - -void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) -{ - if (flags & KMF_GPA) { - /* all of guest virtual address space could be affected */ - if (flags & KMF_KERN) - /* useg, kseg0, seg2/3 */ - kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff); - else - /* useg */ - kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); - } else { - /* useg */ - kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); - - /* kseg2/3 */ - if (flags & KMF_KERN) - kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff); - } -} - -static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte) -{ - /* - * Don't leak writeable but clean entries from GPA page tables. We don't - * want the normal Linux tlbmod handler to handle dirtying when KVM - * accesses guest memory. - */ - if (!pte_dirty(pte)) - pte = pte_wrprotect(pte); - - return pte; -} - -static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) -{ - /* Guest EntryLo overrides host EntryLo */ - if (!(entrylo & ENTRYLO_D)) - pte = pte_mkclean(pte); - - return kvm_mips_gpa_pte_to_gva_unmapped(pte); -} - -#ifdef CONFIG_KVM_MIPS_VZ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu, bool write_fault) @@ -972,125 +769,6 @@ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, /* Invalidate this entry in the TLB */ return kvm_vz_host_tlb_inv(vcpu, badvaddr); } -#endif - -/* XXXKYMA: Must be called with interrupts disabled */ -int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu, - bool write_fault) -{ - unsigned long gpa; - pte_t pte_gpa[2], *ptep_gva; - int idx; - - if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { - kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); - kvm_mips_dump_host_tlbs(); - return -1; - } - - /* Get the GPA page table entry */ - gpa = KVM_GUEST_CPHYSADDR(badvaddr); - idx = (badvaddr >> PAGE_SHIFT) & 1; - if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx], - &pte_gpa[!idx]) < 0) - return -1; - - /* Get the GVA page table entry */ - ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE); - if (!ptep_gva) { - kvm_err("No ptep for gva %lx\n", badvaddr); - return -1; - } - - /* Copy a pair of entries from GPA page table to GVA page table */ - ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]); - ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]); - - /* Invalidate this entry in the TLB, guest kernel ASID only */ - kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); - return 0; -} - -int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, - unsigned long gva, - bool write_fault) -{ - struct kvm *kvm = vcpu->kvm; - long tlb_lo[2]; - pte_t pte_gpa[2], *ptep_buddy, *ptep_gva; - unsigned int idx = TLB_LO_IDX(*tlb, gva); - bool kernel = KVM_GUEST_KERNEL_MODE(vcpu); - - tlb_lo[0] = tlb->tlb_lo[0]; - tlb_lo[1] = tlb->tlb_lo[1]; - - /* - * The commpage address must not be mapped to anything else if the guest - * TLB contains entries nearby, or commpage accesses will break. - */ - if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) - tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0; - - /* Get the GPA page table entry */ - if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]), - write_fault, &pte_gpa[idx], NULL) < 0) - return -1; - - /* And its GVA buddy's GPA page table entry if it also exists */ - pte_gpa[!idx] = pfn_pte(0, __pgprot(0)); - if (tlb_lo[!idx] & ENTRYLO_V) { - spin_lock(&kvm->mmu_lock); - ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL, - mips3_tlbpfn_to_paddr(tlb_lo[!idx])); - if (ptep_buddy) - pte_gpa[!idx] = *ptep_buddy; - spin_unlock(&kvm->mmu_lock); - } - - /* Get the GVA page table entry pair */ - ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); - if (!ptep_gva) { - kvm_err("No ptep for gva %lx\n", gva); - return -1; - } - - /* Copy a pair of entries from GPA page table to GVA page table */ - ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]); - ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]); - - /* Invalidate this entry in the TLB, current guest mode ASID only */ - kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); - - kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, - tlb->tlb_lo[0], tlb->tlb_lo[1]); - - return 0; -} - -int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu) -{ - kvm_pfn_t pfn; - pte_t *ptep; - pgprot_t prot; - - ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr); - if (!ptep) { - kvm_err("No ptep for commpage %lx\n", badvaddr); - return -1; - } - - pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); - /* Also set valid and dirty, so refill handler doesn't have to */ - prot = vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED); - *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, prot))); - - /* Invalidate this entry in the TLB, guest kernel ASID only */ - kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); - return 0; -} /** * kvm_mips_migrate_count() - Migrate timer. @@ -1153,86 +831,3 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) local_irq_restore(flags); } - -/** - * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault. - * @vcpu: Virtual CPU. - * @gva: Guest virtual address to be accessed. - * @write: True if write attempted (must be dirtied and made writable). - * - * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and - * dirtying the page if @write so that guest instructions can be modified. - * - * Returns: KVM_MIPS_MAPPED on success. - * KVM_MIPS_GVA if bad guest virtual address. - * KVM_MIPS_GPA if bad guest physical address. - * KVM_MIPS_TLB if guest TLB not present. - * KVM_MIPS_TLBINV if guest TLB present but not valid. - * KVM_MIPS_TLBMOD if guest TLB read only. - */ -enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, - unsigned long gva, - bool write) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb *tlb; - int index; - - if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { - if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) - return KVM_MIPS_GPA; - } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || - KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { - /* Address should be in the guest TLB */ - index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID)); - if (index < 0) - return KVM_MIPS_TLB; - tlb = &vcpu->arch.guest_tlb[index]; - - /* Entry should be valid, and dirty for writes */ - if (!TLB_IS_VALID(*tlb, gva)) - return KVM_MIPS_TLBINV; - if (write && !TLB_IS_DIRTY(*tlb, gva)) - return KVM_MIPS_TLBMOD; - - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write)) - return KVM_MIPS_GPA; - } else { - return KVM_MIPS_GVA; - } - - return KVM_MIPS_MAPPED; -} - -int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) -{ - int err; - - if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ), - "Expect BadInstr/BadInstrP registers to be used with VZ\n")) - return -EINVAL; - -retry: - kvm_trap_emul_gva_lockless_begin(vcpu); - err = get_user(*out, opc); - kvm_trap_emul_gva_lockless_end(vcpu); - - if (unlikely(err)) { - /* - * Try to handle the fault, maybe we just raced with a GVA - * invalidation. - */ - err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc, - false); - if (unlikely(err)) { - kvm_err("%s: illegal address: %p\n", - __func__, opc); - return -EFAULT; - } - - /* Hopefully it'll work now */ - goto retry; - } - return 0; -} diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 1c1fbce3f566..1088114e5482 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -30,10 +30,6 @@ #include <asm/r4kcache.h> #define CONFIG_MIPS_MT -#define KVM_GUEST_PC_TLB 0 -#define KVM_GUEST_SP_TLB 1 - -#ifdef CONFIG_KVM_MIPS_VZ unsigned long GUESTID_MASK; EXPORT_SYMBOL_GPL(GUESTID_MASK); unsigned long GUESTID_FIRST_VERSION; @@ -50,91 +46,6 @@ static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) else return cpu_asid(smp_processor_id(), gpa_mm); } -#endif - -static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - int cpu = smp_processor_id(); - - return cpu_asid(cpu, kern_mm); -} - -static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) -{ - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - int cpu = smp_processor_id(); - - return cpu_asid(cpu, user_mm); -} - -/* Structure defining an tlb entry data set. */ - -void kvm_mips_dump_host_tlbs(void) -{ - unsigned long flags; - - local_irq_save(flags); - - kvm_info("HOST TLBs:\n"); - dump_tlb_regs(); - pr_info("\n"); - dump_tlb_all(); - - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); - -void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - struct kvm_mips_tlb tlb; - int i; - - kvm_info("Guest TLBs:\n"); - kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); - - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { - tlb = vcpu->arch.guest_tlb[i]; - kvm_info("TLB%c%3d Hi 0x%08lx ", - (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V - ? ' ' : '*', - i, tlb.tlb_hi); - kvm_info("Lo0=0x%09llx %c%c attr %lx ", - (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), - (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', - (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', - (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); - kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", - (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), - (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', - (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', - (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, - tlb.tlb_mask); - } -} -EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); - -int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) -{ - int i; - int index = -1; - struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; - - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { - if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && - TLB_HI_ASID_HIT(tlb[i], entryhi)) { - index = i; - break; - } - } - - kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", - __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); - - return index; -} -EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); static int _kvm_mips_host_tlb_inv(unsigned long entryhi) { @@ -163,54 +74,6 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) return idx; } -int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, - bool user, bool kernel) -{ - /* - * Initialize idx_user and idx_kernel to workaround bogus - * maybe-initialized warning when using GCC 6. - */ - int idx_user = 0, idx_kernel = 0; - unsigned long flags, old_entryhi; - - local_irq_save(flags); - - old_entryhi = read_c0_entryhi(); - - if (user) - idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | - kvm_mips_get_user_asid(vcpu)); - if (kernel) - idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | - kvm_mips_get_kernel_asid(vcpu)); - - write_c0_entryhi(old_entryhi); - mtc0_tlbw_hazard(); - - local_irq_restore(flags); - - /* - * We don't want to get reserved instruction exceptions for missing tlb - * entries. - */ - if (cpu_has_vtag_icache) - flush_icache_all(); - - if (user && idx_user >= 0) - kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", - __func__, (va & VPN2_MASK) | - kvm_mips_get_user_asid(vcpu), idx_user); - if (kernel && idx_kernel >= 0) - kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", - __func__, (va & VPN2_MASK) | - kvm_mips_get_kernel_asid(vcpu), idx_kernel); - - return 0; -} -EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); - -#ifdef CONFIG_KVM_MIPS_VZ - /* GuestID management */ /** @@ -661,40 +524,3 @@ void kvm_loongson_clear_guest_ftlb(void) } EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); #endif - -#endif - -/** - * kvm_mips_suspend_mm() - Suspend the active mm. - * @cpu The CPU we're running on. - * - * Suspend the active_mm, ready for a switch to a KVM guest virtual address - * space. This is left active for the duration of guest context, including time - * with interrupts enabled, so we need to be careful not to confuse e.g. cache - * management IPIs. - * - * kvm_mips_resume_mm() should be called before context switching to a different - * process so we don't need to worry about reference counting. - * - * This needs to be in static kernel code to avoid exporting init_mm. - */ -void kvm_mips_suspend_mm(int cpu) -{ - cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); - current->active_mm = &init_mm; -} -EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); - -/** - * kvm_mips_resume_mm() - Resume the current process mm. - * @cpu The CPU we're running on. - * - * Resume the mm of the current process, after a switch back from a KVM guest - * virtual address space (see kvm_mips_suspend_mm()). - */ -void kvm_mips_resume_mm(int cpu) -{ - cpumask_set_cpu(cpu, mm_cpumask(current->mm)); - current->active_mm = current->mm; -} -EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c deleted file mode 100644 index 0788c00d7e94..000000000000 --- a/arch/mips/kvm/trap_emul.c +++ /dev/null @@ -1,1306 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal <sanjayl@kymasys.com> - */ - -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/kvm_host.h> -#include <linux/log2.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <asm/mmu_context.h> -#include <asm/pgalloc.h> - -#include "interrupt.h" - -static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) -{ - gpa_t gpa; - gva_t kseg = KSEGX(gva); - gva_t gkseg = KVM_GUEST_KSEGX(gva); - - if ((kseg == CKSEG0) || (kseg == CKSEG1)) - gpa = CPHYSADDR(gva); - else if (gkseg == KVM_GUEST_KSEG0) - gpa = KVM_GUEST_CPHYSADDR(gva); - else { - kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); - kvm_mips_dump_host_tlbs(); - gpa = KVM_INVALID_ADDR; - } - - kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); - - return gpa; -} - -static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 inst = 0; - - /* - * Fetch the instruction. - */ - if (cause & CAUSEF_BD) - opc += 1; - kvm_get_badinstr(opc, vcpu, &inst); - - kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", - exccode, opc, inst, badvaddr, - kvm_read_c0_guest_status(vcpu->arch.cop0)); - kvm_arch_vcpu_dump_regs(vcpu); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; -} - -static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { - /* FPU Unusable */ - if (!kvm_mips_guest_has_fpu(&vcpu->arch) || - (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { - /* - * Unusable/no FPU in guest: - * deliver guest COP1 Unusable Exception - */ - er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu); - } else { - /* Restore FPU state */ - kvm_own_fpu(vcpu); - er = EMULATE_DONE; - } - } else { - er = kvm_mips_emulate_inst(cause, opc, vcpu); - } - - switch (er) { - case EMULATE_DONE: - ret = RESUME_GUEST; - break; - - case EMULATE_FAIL: - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - break; - - case EMULATE_WAIT: - vcpu->run->exit_reason = KVM_EXIT_INTR; - ret = RESUME_HOST; - break; - - case EMULATE_HYPERCALL: - ret = kvm_mips_handle_hypcall(vcpu); - break; - - default: - BUG(); - } - return ret; -} - -static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) -{ - enum emulation_result er; - union mips_instruction inst; - int err; - - /* A code fetch fault doesn't count as an MMIO */ - if (kvm_is_ifetch_fault(&vcpu->arch)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* Emulate the load */ - er = kvm_mips_emulate_load(inst, cause, vcpu); - if (er == EMULATE_FAIL) { - kvm_err("Emulate load from MMIO space failed\n"); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - } else { - vcpu->run->exit_reason = KVM_EXIT_MMIO; - } - return RESUME_HOST; -} - -static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) -{ - enum emulation_result er; - union mips_instruction inst; - int err; - - /* Fetch the instruction. */ - if (cause & CAUSEF_BD) - opc += 1; - err = kvm_get_badinstr(opc, vcpu, &inst.word); - if (err) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* Emulate the store */ - er = kvm_mips_emulate_store(inst, cause, vcpu); - if (er == EMULATE_FAIL) { - kvm_err("Emulate store to MMIO space failed\n"); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - } else { - vcpu->run->exit_reason = KVM_EXIT_MMIO; - } - return RESUME_HOST; -} - -static int kvm_mips_bad_access(u32 cause, u32 *opc, - struct kvm_vcpu *vcpu, bool store) -{ - if (store) - return kvm_mips_bad_store(cause, opc, vcpu); - else - return kvm_mips_bad_load(cause, opc, vcpu); -} - -static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - struct kvm_mips_tlb *tlb; - unsigned long entryhi; - int index; - - if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 - || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { - /* - * First find the mapping in the guest TLB. If the failure to - * write was due to the guest TLB, it should be up to the guest - * to handle it. - */ - entryhi = (badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); - index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); - - /* - * These should never happen. - * They would indicate stale host TLB entries. - */ - if (unlikely(index < 0)) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - tlb = vcpu->arch.guest_tlb + index; - if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return RESUME_HOST; - } - - /* - * Guest entry not dirty? That would explain the TLB modified - * exception. Relay that on to the guest so it can handle it. - */ - if (!TLB_IS_DIRTY(*tlb, badvaddr)) { - kvm_mips_emulate_tlbmod(cause, opc, vcpu); - return RESUME_GUEST; - } - - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, - true)) - /* Not writable, needs handling as MMIO */ - return kvm_mips_bad_store(cause, opc, vcpu); - return RESUME_GUEST; - } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { - if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) - /* Not writable, needs handling as MMIO */ - return kvm_mips_bad_store(cause, opc, vcpu); - return RESUME_GUEST; - } else { - /* host kernel addresses are all handled as MMIO */ - return kvm_mips_bad_store(cause, opc, vcpu); - } -} - -static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) -{ - struct kvm_run *run = vcpu->run; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) - && KVM_GUEST_KERNEL_MODE(vcpu)) { - if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 - || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { - kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", - store ? "ST" : "LD", cause, opc, badvaddr); - - /* - * User Address (UA) fault, this could happen if - * (1) TLB entry not present/valid in both Guest and shadow host - * TLBs, in this case we pass on the fault to the guest - * kernel and let it handle it. - * (2) TLB entry is present in the Guest TLB but not in the - * shadow, in this case we inject the TLB from the Guest TLB - * into the shadow host TLB - */ - - er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { - /* - * All KSEG0 faults are handled by KVM, as the guest kernel does - * not expect to ever get them - */ - if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) - ret = kvm_mips_bad_access(cause, opc, vcpu, store); - } else if (KVM_GUEST_KERNEL_MODE(vcpu) - && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { - /* - * With EVA we may get a TLB exception instead of an address - * error when the guest performs MMIO to KSeg1 addresses. - */ - ret = kvm_mips_bad_access(cause, opc, vcpu, store); - } else { - kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", - store ? "ST" : "LD", cause, opc, badvaddr); - kvm_mips_dump_host_tlbs(); - kvm_arch_vcpu_dump_regs(vcpu); - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) -{ - return kvm_trap_emul_handle_tlb_miss(vcpu, true); -} - -static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) -{ - return kvm_trap_emul_handle_tlb_miss(vcpu, false); -} - -static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - int ret = RESUME_GUEST; - - if (KVM_GUEST_KERNEL_MODE(vcpu) - && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { - ret = kvm_mips_bad_store(cause, opc, vcpu); - } else { - kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; - u32 cause = vcpu->arch.host_cp0_cause; - int ret = RESUME_GUEST; - - if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { - ret = kvm_mips_bad_load(cause, opc, vcpu); - } else { - kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", - cause, opc, badvaddr); - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_syscall(cause, opc, vcpu); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_handle_ri(cause, opc, vcpu); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_bp_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) - ret = RESUME_GUEST; - else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *)vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_trap_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) { - ret = RESUME_GUEST; - } else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *)vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) { - ret = RESUME_GUEST; - } else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) -{ - u32 __user *opc = (u32 __user *)vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu); - if (er == EMULATE_DONE) { - ret = RESUME_GUEST; - } else { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - } - return ret; -} - -/** - * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. - * @vcpu: Virtual CPU context. - * - * Handle when the guest attempts to use MSA when it is disabled. - */ -static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 __user *opc = (u32 __user *) vcpu->arch.pc; - u32 cause = vcpu->arch.host_cp0_cause; - enum emulation_result er = EMULATE_DONE; - int ret = RESUME_GUEST; - - if (!kvm_mips_guest_has_msa(&vcpu->arch) || - (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { - /* - * No MSA in guest, or FPU enabled and not in FR=1 mode, - * guest reserved instruction exception - */ - er = kvm_mips_emulate_ri_exc(cause, opc, vcpu); - } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { - /* MSA disabled by guest, guest MSA disabled exception */ - er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu); - } else { - /* Restore MSA/FPU state */ - kvm_own_msa(vcpu); - er = EMULATE_DONE; - } - - switch (er) { - case EMULATE_DONE: - ret = RESUME_GUEST; - break; - - case EMULATE_FAIL: - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; - break; - - default: - BUG(); - } - return ret; -} - -static int kvm_trap_emul_hardware_enable(void) -{ - return 0; -} - -static void kvm_trap_emul_hardware_disable(void) -{ -} - -static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext) -{ - int r; - - switch (ext) { - case KVM_CAP_MIPS_TE: - r = 1; - break; - case KVM_CAP_IOEVENTFD: - r = 1; - break; - default: - r = 0; - break; - } - - return r; -} - -static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - - /* - * Allocate GVA -> HPA page tables. - * MIPS doesn't use the mm_struct pointer argument. - */ - kern_mm->pgd = pgd_alloc(kern_mm); - if (!kern_mm->pgd) - return -ENOMEM; - - user_mm->pgd = pgd_alloc(user_mm); - if (!user_mm->pgd) { - pgd_free(kern_mm, kern_mm->pgd); - return -ENOMEM; - } - - return 0; -} - -static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) -{ - /* Don't free host kernel page tables copied from init_mm.pgd */ - const unsigned long end = 0x80000000; - unsigned long pgd_va, pud_va, pmd_va; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int i, j, k; - - for (i = 0; i < USER_PTRS_PER_PGD; i++) { - if (pgd_none(pgd[i])) - continue; - - pgd_va = (unsigned long)i << PGDIR_SHIFT; - if (pgd_va >= end) - break; - p4d = p4d_offset(pgd, 0); - pud = pud_offset(p4d + i, 0); - for (j = 0; j < PTRS_PER_PUD; j++) { - if (pud_none(pud[j])) - continue; - - pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); - if (pud_va >= end) - break; - pmd = pmd_offset(pud + j, 0); - for (k = 0; k < PTRS_PER_PMD; k++) { - if (pmd_none(pmd[k])) - continue; - - pmd_va = pud_va | (k << PMD_SHIFT); - if (pmd_va >= end) - break; - pte = pte_offset_kernel(pmd + k, 0); - pte_free_kernel(NULL, pte); - } - pmd_free(NULL, pmd); - } - pud_free(NULL, pud); - } - pgd_free(NULL, pgd); -} - -static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); - kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); -} - -static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - u32 config, config1; - int vcpu_id = vcpu->vcpu_id; - - /* Start off the timer at 100 MHz */ - kvm_mips_init_count(vcpu, 100*1000*1000); - - /* - * Arch specific stuff, set up config registers properly so that the - * guest will come up as expected - */ -#ifndef CONFIG_CPU_MIPSR6 - /* r2-r5, simulate a MIPS 24kc */ - kvm_write_c0_guest_prid(cop0, 0x00019300); -#else - /* r6+, simulate a generic QEMU machine */ - kvm_write_c0_guest_prid(cop0, 0x00010000); -#endif - /* - * Have config1, Cacheable, noncoherent, write-back, write allocate. - * Endianness, arch revision & virtually tagged icache should match - * host. - */ - config = read_c0_config() & MIPS_CONF_AR; - config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; -#ifdef CONFIG_CPU_BIG_ENDIAN - config |= CONF_BE; -#endif - if (cpu_has_vtag_icache) - config |= MIPS_CONF_VI; - kvm_write_c0_guest_config(cop0, config); - - /* Read the cache characteristics from the host Config1 Register */ - config1 = (read_c0_config1() & ~0x7f); - - /* DCache line size not correctly reported in Config1 on Octeon CPUs */ - if (cpu_dcache_line_size()) { - config1 &= ~MIPS_CONF1_DL; - config1 |= ((ilog2(cpu_dcache_line_size()) - 1) << - MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL; - } - - /* Set up MMU size */ - config1 &= ~(0x3f << 25); - config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); - - /* We unset some bits that we aren't emulating */ - config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | - MIPS_CONF1_WR | MIPS_CONF1_CA); - kvm_write_c0_guest_config1(cop0, config1); - - /* Have config3, no tertiary/secondary caches implemented */ - kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); - /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ - - /* Have config4, UserLocal */ - kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); - - /* Have config5 */ - kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); - - /* No config6 */ - kvm_write_c0_guest_config5(cop0, 0); - - /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ - kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); - - /* Status */ - kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL); - - /* - * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) - */ - kvm_write_c0_guest_intctl(cop0, 0xFC000000); - - /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ - kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | - (vcpu_id & MIPS_EBASE_CPUNUM)); - - /* Put PC at guest reset vector */ - vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); - - return 0; -} - -static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) -{ - /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ - kvm_flush_remote_tlbs(kvm); -} - -static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, - const struct kvm_memory_slot *slot) -{ - kvm_trap_emul_flush_shadow_all(kvm); -} - -static u64 kvm_trap_emul_get_one_regs[] = { - KVM_REG_MIPS_CP0_INDEX, - KVM_REG_MIPS_CP0_ENTRYLO0, - KVM_REG_MIPS_CP0_ENTRYLO1, - KVM_REG_MIPS_CP0_CONTEXT, - KVM_REG_MIPS_CP0_USERLOCAL, - KVM_REG_MIPS_CP0_PAGEMASK, - KVM_REG_MIPS_CP0_WIRED, - KVM_REG_MIPS_CP0_HWRENA, - KVM_REG_MIPS_CP0_BADVADDR, - KVM_REG_MIPS_CP0_COUNT, - KVM_REG_MIPS_CP0_ENTRYHI, - KVM_REG_MIPS_CP0_COMPARE, - KVM_REG_MIPS_CP0_STATUS, - KVM_REG_MIPS_CP0_INTCTL, - KVM_REG_MIPS_CP0_CAUSE, - KVM_REG_MIPS_CP0_EPC, - KVM_REG_MIPS_CP0_PRID, - KVM_REG_MIPS_CP0_EBASE, - KVM_REG_MIPS_CP0_CONFIG, - KVM_REG_MIPS_CP0_CONFIG1, - KVM_REG_MIPS_CP0_CONFIG2, - KVM_REG_MIPS_CP0_CONFIG3, - KVM_REG_MIPS_CP0_CONFIG4, - KVM_REG_MIPS_CP0_CONFIG5, - KVM_REG_MIPS_CP0_CONFIG7, - KVM_REG_MIPS_CP0_ERROREPC, - KVM_REG_MIPS_CP0_KSCRATCH1, - KVM_REG_MIPS_CP0_KSCRATCH2, - KVM_REG_MIPS_CP0_KSCRATCH3, - KVM_REG_MIPS_CP0_KSCRATCH4, - KVM_REG_MIPS_CP0_KSCRATCH5, - KVM_REG_MIPS_CP0_KSCRATCH6, - - KVM_REG_MIPS_COUNT_CTL, - KVM_REG_MIPS_COUNT_RESUME, - KVM_REG_MIPS_COUNT_HZ, -}; - -static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) -{ - return ARRAY_SIZE(kvm_trap_emul_get_one_regs); -} - -static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, - u64 __user *indices) -{ - if (copy_to_user(indices, kvm_trap_emul_get_one_regs, - sizeof(kvm_trap_emul_get_one_regs))) - return -EFAULT; - indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs); - - return 0; -} - -static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - s64 *v) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - - switch (reg->id) { - case KVM_REG_MIPS_CP0_INDEX: - *v = (long)kvm_read_c0_guest_index(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYLO0: - *v = kvm_read_c0_guest_entrylo0(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYLO1: - *v = kvm_read_c0_guest_entrylo1(cop0); - break; - case KVM_REG_MIPS_CP0_CONTEXT: - *v = (long)kvm_read_c0_guest_context(cop0); - break; - case KVM_REG_MIPS_CP0_USERLOCAL: - *v = (long)kvm_read_c0_guest_userlocal(cop0); - break; - case KVM_REG_MIPS_CP0_PAGEMASK: - *v = (long)kvm_read_c0_guest_pagemask(cop0); - break; - case KVM_REG_MIPS_CP0_WIRED: - *v = (long)kvm_read_c0_guest_wired(cop0); - break; - case KVM_REG_MIPS_CP0_HWRENA: - *v = (long)kvm_read_c0_guest_hwrena(cop0); - break; - case KVM_REG_MIPS_CP0_BADVADDR: - *v = (long)kvm_read_c0_guest_badvaddr(cop0); - break; - case KVM_REG_MIPS_CP0_ENTRYHI: - *v = (long)kvm_read_c0_guest_entryhi(cop0); - break; - case KVM_REG_MIPS_CP0_COMPARE: - *v = (long)kvm_read_c0_guest_compare(cop0); - break; - case KVM_REG_MIPS_CP0_STATUS: - *v = (long)kvm_read_c0_guest_status(cop0); - break; - case KVM_REG_MIPS_CP0_INTCTL: - *v = (long)kvm_read_c0_guest_intctl(cop0); - break; - case KVM_REG_MIPS_CP0_CAUSE: - *v = (long)kvm_read_c0_guest_cause(cop0); - break; - case KVM_REG_MIPS_CP0_EPC: - *v = (long)kvm_read_c0_guest_epc(cop0); - break; - case KVM_REG_MIPS_CP0_PRID: - *v = (long)kvm_read_c0_guest_prid(cop0); - break; - case KVM_REG_MIPS_CP0_EBASE: - *v = (long)kvm_read_c0_guest_ebase(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG: - *v = (long)kvm_read_c0_guest_config(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG1: - *v = (long)kvm_read_c0_guest_config1(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG2: - *v = (long)kvm_read_c0_guest_config2(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG3: - *v = (long)kvm_read_c0_guest_config3(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG4: - *v = (long)kvm_read_c0_guest_config4(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG5: - *v = (long)kvm_read_c0_guest_config5(cop0); - break; - case KVM_REG_MIPS_CP0_CONFIG7: - *v = (long)kvm_read_c0_guest_config7(cop0); - break; - case KVM_REG_MIPS_CP0_COUNT: - *v = kvm_mips_read_count(vcpu); - break; - case KVM_REG_MIPS_COUNT_CTL: - *v = vcpu->arch.count_ctl; - break; - case KVM_REG_MIPS_COUNT_RESUME: - *v = ktime_to_ns(vcpu->arch.count_resume); - break; - case KVM_REG_MIPS_COUNT_HZ: - *v = vcpu->arch.count_hz; - break; - case KVM_REG_MIPS_CP0_ERROREPC: - *v = (long)kvm_read_c0_guest_errorepc(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH1: - *v = (long)kvm_read_c0_guest_kscratch1(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH2: - *v = (long)kvm_read_c0_guest_kscratch2(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH3: - *v = (long)kvm_read_c0_guest_kscratch3(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH4: - *v = (long)kvm_read_c0_guest_kscratch4(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH5: - *v = (long)kvm_read_c0_guest_kscratch5(cop0); - break; - case KVM_REG_MIPS_CP0_KSCRATCH6: - *v = (long)kvm_read_c0_guest_kscratch6(cop0); - break; - default: - return -EINVAL; - } - return 0; -} - -static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - s64 v) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - int ret = 0; - unsigned int cur, change; - - switch (reg->id) { - case KVM_REG_MIPS_CP0_INDEX: - kvm_write_c0_guest_index(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYLO0: - kvm_write_c0_guest_entrylo0(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYLO1: - kvm_write_c0_guest_entrylo1(cop0, v); - break; - case KVM_REG_MIPS_CP0_CONTEXT: - kvm_write_c0_guest_context(cop0, v); - break; - case KVM_REG_MIPS_CP0_USERLOCAL: - kvm_write_c0_guest_userlocal(cop0, v); - break; - case KVM_REG_MIPS_CP0_PAGEMASK: - kvm_write_c0_guest_pagemask(cop0, v); - break; - case KVM_REG_MIPS_CP0_WIRED: - kvm_write_c0_guest_wired(cop0, v); - break; - case KVM_REG_MIPS_CP0_HWRENA: - kvm_write_c0_guest_hwrena(cop0, v); - break; - case KVM_REG_MIPS_CP0_BADVADDR: - kvm_write_c0_guest_badvaddr(cop0, v); - break; - case KVM_REG_MIPS_CP0_ENTRYHI: - kvm_write_c0_guest_entryhi(cop0, v); - break; - case KVM_REG_MIPS_CP0_STATUS: - kvm_write_c0_guest_status(cop0, v); - break; - case KVM_REG_MIPS_CP0_INTCTL: - /* No VInt, so no VS, read-only for now */ - break; - case KVM_REG_MIPS_CP0_EPC: - kvm_write_c0_guest_epc(cop0, v); - break; - case KVM_REG_MIPS_CP0_PRID: - kvm_write_c0_guest_prid(cop0, v); - break; - case KVM_REG_MIPS_CP0_EBASE: - /* - * Allow core number to be written, but the exception base must - * remain in guest KSeg0. - */ - kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM, - v); - break; - case KVM_REG_MIPS_CP0_COUNT: - kvm_mips_write_count(vcpu, v); - break; - case KVM_REG_MIPS_CP0_COMPARE: - kvm_mips_write_compare(vcpu, v, false); - break; - case KVM_REG_MIPS_CP0_CAUSE: - /* - * If the timer is stopped or started (DC bit) it must look - * atomic with changes to the interrupt pending bits (TI, IRQ5). - * A timer interrupt should not happen in between. - */ - if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { - if (v & CAUSEF_DC) { - /* disable timer first */ - kvm_mips_count_disable_cause(vcpu); - kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, - v); - } else { - /* enable timer last */ - kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, - v); - kvm_mips_count_enable_cause(vcpu); - } - } else { - kvm_write_c0_guest_cause(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG: - /* read-only for now */ - break; - case KVM_REG_MIPS_CP0_CONFIG1: - cur = kvm_read_c0_guest_config1(cop0); - change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config1(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG2: - /* read-only for now */ - break; - case KVM_REG_MIPS_CP0_CONFIG3: - cur = kvm_read_c0_guest_config3(cop0); - change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config3(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG4: - cur = kvm_read_c0_guest_config4(cop0); - change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config4(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG5: - cur = kvm_read_c0_guest_config5(cop0); - change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); - if (change) { - v = cur ^ change; - kvm_write_c0_guest_config5(cop0, v); - } - break; - case KVM_REG_MIPS_CP0_CONFIG7: - /* writes ignored */ - break; - case KVM_REG_MIPS_COUNT_CTL: - ret = kvm_mips_set_count_ctl(vcpu, v); - break; - case KVM_REG_MIPS_COUNT_RESUME: - ret = kvm_mips_set_count_resume(vcpu, v); - break; - case KVM_REG_MIPS_COUNT_HZ: - ret = kvm_mips_set_count_hz(vcpu, v); - break; - case KVM_REG_MIPS_CP0_ERROREPC: - kvm_write_c0_guest_errorepc(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH1: - kvm_write_c0_guest_kscratch1(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH2: - kvm_write_c0_guest_kscratch2(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH3: - kvm_write_c0_guest_kscratch3(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH4: - kvm_write_c0_guest_kscratch4(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH5: - kvm_write_c0_guest_kscratch5(cop0, v); - break; - case KVM_REG_MIPS_CP0_KSCRATCH6: - kvm_write_c0_guest_kscratch6(cop0, v); - break; - default: - return -EINVAL; - } - return ret; -} - -static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - struct mm_struct *mm; - - /* - * Were we in guest context? If so, restore the appropriate ASID based - * on the mode of the Guest (Kernel/User). - */ - if (current->flags & PF_VCPU) { - mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - check_switch_mmu_context(mm); - kvm_mips_suspend_mm(cpu); - ehb(); - } - - return 0; -} - -static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) -{ - kvm_lose_fpu(vcpu); - - if (current->flags & PF_VCPU) { - /* Restore normal Linux process memory map */ - check_switch_mmu_context(current->mm); - kvm_mips_resume_mm(cpu); - ehb(); - } - - return 0; -} - -static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, - bool reload_asid) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - struct mm_struct *mm; - int i; - - if (likely(!kvm_request_pending(vcpu))) - return; - - if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { - /* - * Both kernel & user GVA mappings must be invalidated. The - * caller is just about to check whether the ASID is stale - * anyway so no need to reload it here. - */ - kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); - kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); - for_each_possible_cpu(i) { - set_cpu_context(i, kern_mm, 0); - set_cpu_context(i, user_mm, 0); - } - - /* Generate new ASID for current mode */ - if (reload_asid) { - mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - get_new_mmu_context(mm); - htw_stop(); - write_c0_entryhi(cpu_asid(cpu, mm)); - TLBMISS_HANDLER_SETUP_PGD(mm->pgd); - htw_start(); - } - } -} - -/** - * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. - * @vcpu: VCPU pointer. - * - * Call before a GVA space access outside of guest mode, to ensure that - * asynchronous TLB flush requests are handled or delayed until completion of - * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). - * - * Should be called with IRQs already enabled. - */ -void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) -{ - /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ - WARN_ON_ONCE(irqs_disabled()); - - /* - * The caller is about to access the GVA space, so we set the mode to - * force TLB flush requests to send an IPI, and also disable IRQs to - * delay IPI handling until kvm_trap_emul_gva_lockless_end(). - */ - local_irq_disable(); - - /* - * Make sure the read of VCPU requests is not reordered ahead of the - * write to vcpu->mode, or we could miss a TLB flush request while - * the requester sees the VCPU as outside of guest mode and not needing - * an IPI. - */ - smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); - - /* - * If a TLB flush has been requested (potentially while - * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it - * before accessing the GVA space, and be sure to reload the ASID if - * necessary as it'll be immediately used. - * - * TLB flush requests after this check will trigger an IPI due to the - * mode change above, which will be delayed due to IRQs disabled. - */ - kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); -} - -/** - * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. - * @vcpu: VCPU pointer. - * - * Called after a GVA space access outside of guest mode. Should have a matching - * call to kvm_trap_emul_gva_lockless_begin(). - */ -void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) -{ - /* - * Make sure the write to vcpu->mode is not reordered in front of GVA - * accesses, or a TLB flush requester may not think it necessary to send - * an IPI. - */ - smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); - - /* - * Now that the access to GVA space is complete, its safe for pending - * TLB flush request IPIs to be handled (which indicates completion). - */ - local_irq_enable(); -} - -static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu) -{ - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; - struct mm_struct *mm; - struct mips_coproc *cop0 = vcpu->arch.cop0; - int i, cpu = smp_processor_id(); - unsigned int gasid; - - /* - * No need to reload ASID, IRQs are disabled already so there's no rush, - * and we'll check if we need to regenerate below anyway before - * re-entering the guest. - */ - kvm_trap_emul_check_requests(vcpu, cpu, false); - - if (KVM_GUEST_KERNEL_MODE(vcpu)) { - mm = kern_mm; - } else { - mm = user_mm; - - /* - * Lazy host ASID regeneration / PT flush for guest user mode. - * If the guest ASID has changed since the last guest usermode - * execution, invalidate the stale TLB entries and flush GVA PT - * entries too. - */ - gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; - if (gasid != vcpu->arch.last_user_gasid) { - kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); - for_each_possible_cpu(i) - set_cpu_context(i, user_mm, 0); - vcpu->arch.last_user_gasid = gasid; - } - } - - /* - * Check if ASID is stale. This may happen due to a TLB flush request or - * a lazy user MM invalidation. - */ - check_mmu_context(mm); -} - -static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) -{ - int cpu = smp_processor_id(); - int r; - - /* Check if we have any exceptions/interrupts pending */ - kvm_mips_deliver_interrupts(vcpu, - kvm_read_c0_guest_cause(vcpu->arch.cop0)); - - kvm_trap_emul_vcpu_reenter(vcpu); - - /* - * We use user accessors to access guest memory, but we don't want to - * invoke Linux page faulting. - */ - pagefault_disable(); - - /* Disable hardware page table walking while in guest */ - htw_stop(); - - /* - * While in guest context we're in the guest's address space, not the - * host process address space, so we need to be careful not to confuse - * e.g. cache management IPIs. - */ - kvm_mips_suspend_mm(cpu); - - r = vcpu->arch.vcpu_run(vcpu); - - /* We may have migrated while handling guest exits */ - cpu = smp_processor_id(); - - /* Restore normal Linux process memory map */ - check_switch_mmu_context(current->mm); - kvm_mips_resume_mm(cpu); - - htw_start(); - - pagefault_enable(); - - return r; -} - -static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { - /* exit handlers */ - .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, - .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, - .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, - .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, - .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, - .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, - .handle_syscall = kvm_trap_emul_handle_syscall, - .handle_res_inst = kvm_trap_emul_handle_res_inst, - .handle_break = kvm_trap_emul_handle_break, - .handle_trap = kvm_trap_emul_handle_trap, - .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, - .handle_fpe = kvm_trap_emul_handle_fpe, - .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, - .handle_guest_exit = kvm_trap_emul_no_handler, - - .hardware_enable = kvm_trap_emul_hardware_enable, - .hardware_disable = kvm_trap_emul_hardware_disable, - .check_extension = kvm_trap_emul_check_extension, - .vcpu_init = kvm_trap_emul_vcpu_init, - .vcpu_uninit = kvm_trap_emul_vcpu_uninit, - .vcpu_setup = kvm_trap_emul_vcpu_setup, - .flush_shadow_all = kvm_trap_emul_flush_shadow_all, - .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, - .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, - .queue_timer_int = kvm_mips_queue_timer_int_cb, - .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, - .queue_io_int = kvm_mips_queue_io_int_cb, - .dequeue_io_int = kvm_mips_dequeue_io_int_cb, - .irq_deliver = kvm_mips_irq_deliver_cb, - .irq_clear = kvm_mips_irq_clear_cb, - .num_regs = kvm_trap_emul_num_regs, - .copy_reg_indices = kvm_trap_emul_copy_reg_indices, - .get_one_reg = kvm_trap_emul_get_one_reg, - .set_one_reg = kvm_trap_emul_set_one_reg, - .vcpu_load = kvm_trap_emul_vcpu_load, - .vcpu_put = kvm_trap_emul_vcpu_put, - .vcpu_run = kvm_trap_emul_vcpu_run, - .vcpu_reenter = kvm_trap_emul_vcpu_reenter, -}; - -int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) -{ - *install_callbacks = &kvm_trap_emul_callbacks; - return 0; -} diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 2ffbe9264a31..d0d03bddbbba 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -292,9 +292,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, switch (priority) { case MIPS_EXC_INT_TIMER: /* - * Call to kvm_write_c0_guest_compare() clears Cause.TI in - * kvm_mips_emulate_CP0(). Explicitly clear irq associated with - * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not + * Explicitly clear irq associated with Cause.IP[IPTI] + * if GuestCtl2 virtual interrupt register not * supported or if not using GuestCtl2 Hardware Clear. */ if (cpu_has_guestctl2) { diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 88065ee433cd..e19fb98b5d38 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ .L__memcpy: -FEXPORT(__copy_user) -EXPORT_SYMBOL(__copy_user) +#ifndef CONFIG_EVA +FEXPORT(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) +FEXPORT(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +FEXPORT(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) +#endif /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP @@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user) * __copy_from_user (EVA) */ -LEAF(__copy_from_user_eva) -EXPORT_SYMBOL(__copy_from_user_eva) +LEAF(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) __BUILD_COPY_USER EVA_MODE USEROP KERNELOP -END(__copy_from_user_eva) +END(__raw_copy_from_user) @@ -692,18 +698,18 @@ END(__copy_from_user_eva) * __copy_to_user (EVA) */ -LEAF(__copy_to_user_eva) -EXPORT_SYMBOL(__copy_to_user_eva) +LEAF(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP -END(__copy_to_user_eva) +END(__raw_copy_to_user) /* * __copy_in_user (EVA) */ -LEAF(__copy_in_user_eva) -EXPORT_SYMBOL(__copy_in_user_eva) +LEAF(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) __BUILD_COPY_USER EVA_MODE USEROP USEROP -END(__copy_in_user_eva) +END(__raw_copy_in_user) #endif diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index d5449e8a3dfc..b0baa3c79fad 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset) #ifndef CONFIG_EVA FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) -#else -FEXPORT(__bzero_kernel) -EXPORT_SYMBOL(__bzero_kernel) #endif __BUILD_BZERO LEGACY_MODE diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index acdff66bd5d2..556acf684d7b 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -29,19 +29,17 @@ * it happens at most some bytes of the exceptions handlers will be copied. */ - .macro __BUILD_STRNCPY_ASM func -LEAF(__strncpy_from_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a1 - bnez v0, .Lfault\@ - +LEAF(__strncpy_from_user_asm) move t0, zero move v1, a1 -.ifeqs "\func","kernel" -1: EX(lbu, v0, (v1), .Lfault\@) -.else -1: EX(lbue, v0, (v1), .Lfault\@) -.endif +#ifdef CONFIG_EVA + .set push + .set eva +1: EX(lbue, v0, (v1), .Lfault) + .set pop +#else +1: EX(lbu, v0, (v1), .Lfault) +#endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) sb v0, (a0) @@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm) bne t0, a2, 1b 2: PTR_ADDU v0, a1, t0 xor v0, a1 - bltz v0, .Lfault\@ + bltz v0, .Lfault move v0, t0 jr ra # return n - END(__strncpy_from_\func\()_asm) + END(__strncpy_from_user_asm) -.Lfault\@: +.Lfault: li v0, -EFAULT jr ra .section __ex_table,"a" - PTR 1b, .Lfault\@ + PTR 1b, .Lfault .previous - .endm - -#ifndef CONFIG_EVA - /* Set aliases */ - .global __strncpy_from_user_asm - .set __strncpy_from_user_asm, __strncpy_from_kernel_asm -EXPORT_SYMBOL(__strncpy_from_user_asm) -#endif - -__BUILD_STRNCPY_ASM kernel -EXPORT_SYMBOL(__strncpy_from_kernel_asm) - -#ifdef CONFIG_EVA - .set push - .set eva -__BUILD_STRNCPY_ASM user - .set pop -EXPORT_SYMBOL(__strncpy_from_user_asm) -#endif + EXPORT_SYMBOL(__strncpy_from_user_asm) diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index e1bacf5a3abe..92b63f20ec05 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -26,12 +26,7 @@ * bytes. There's nothing secret there. On 64-bit accessing beyond * the maximum is a tad hairier ... */ - .macro __BUILD_STRNLEN_ASM func -LEAF(__strnlen_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a0 - bnez v0, .Lfault\@ - +LEAF(__strnlen_user_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer 1: @@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm) li AT, 1 #endif beq v0, a1, 1f # limit reached? -.ifeqs "\func", "kernel" - EX(lb, t0, (v0), .Lfault\@) -.else - EX(lbe, t0, (v0), .Lfault\@) -.endif +#ifdef CONFIG_EVA + .set push + .set eva + EX(lbe, t0, (v0), .Lfault) + .set pop +#else + EX(lb, t0, (v0), .Lfault) +#endif .set noreorder bnez t0, 1b 1: @@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm) .set reorder PTR_SUBU v0, a0 jr ra - END(__strnlen_\func\()_asm) + END(__strnlen_user_asm) -.Lfault\@: +.Lfault: move v0, zero jr ra - .endm - -#ifndef CONFIG_EVA - /* Set aliases */ - .global __strnlen_user_asm - .set __strnlen_user_asm, __strnlen_kernel_asm -EXPORT_SYMBOL(__strnlen_user_asm) -#endif - -__BUILD_STRNLEN_ASM kernel -EXPORT_SYMBOL(__strnlen_kernel_asm) - -#ifdef CONFIG_EVA - .set push - .set eva -__BUILD_STRNLEN_ASM user - .set pop -EXPORT_SYMBOL(__strnlen_user_asm) -#endif + EXPORT_SYMBOL(__strnlen_user_asm) diff --git a/arch/mips/loongson64/Makefile b/arch/mips/loongson64/Makefile index cc76944b1a9d..e806280bbb85 100644 --- a/arch/mips/loongson64/Makefile +++ b/arch/mips/loongson64/Makefile @@ -2,7 +2,7 @@ # # Makefile for Loongson-3 family machines # -obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o platform.o dma.o \ +obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o dma.o \ setup.o init.o env.o time.o reset.o \ obj-$(CONFIG_SMP) += smp.o diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c index 51a5d050a94c..c8bb75d58f17 100644 --- a/arch/mips/loongson64/env.c +++ b/arch/mips/loongson64/env.c @@ -43,7 +43,18 @@ const char *get_system_type(void) return "Generic Loongson64 System"; } -void __init prom_init_env(void) + +void __init prom_dtb_init_env(void) +{ + if ((fw_arg2 < CKSEG0 || fw_arg2 > CKSEG1) + && (fw_arg2 < XKPHYS || fw_arg2 > XKSEG)) + + loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin; + else + loongson_fdt_blob = (void *)fw_arg2; +} + +void __init prom_lefi_init_env(void) { struct boot_params *boot_p; struct loongson_params *loongson_p; @@ -95,7 +106,6 @@ void __init prom_init_env(void) loongson_freqctrl[1] = 0x900010001fe001d0; loongson_freqctrl[2] = 0x900020001fe001d0; loongson_freqctrl[3] = 0x900030001fe001d0; - loongson_sysconf.ht_control_base = 0x90000EFDFB000000; loongson_sysconf.workarounds = WORKAROUND_CPUFREQ; break; case Legacy_3B: @@ -118,7 +128,6 @@ void __init prom_init_env(void) loongson_freqctrl[1] = 0x900020001fe001d0; loongson_freqctrl[2] = 0x900040001fe001d0; loongson_freqctrl[3] = 0x900060001fe001d0; - loongson_sysconf.ht_control_base = 0x90001EFDFB000000; loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG; break; default: @@ -136,9 +145,6 @@ void __init prom_init_env(void) loongson_sysconf.cores_per_node - 1) / loongson_sysconf.cores_per_node; - loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr; - loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr; - loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr; loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits; if (loongson_sysconf.dma_mask_bits < 32 || loongson_sysconf.dma_mask_bits > 64) @@ -153,23 +159,8 @@ void __init prom_init_env(void) loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr, loongson_sysconf.vgabios_addr); - memset(loongson_sysconf.ecname, 0, 32); - if (esys->has_ec) - memcpy(loongson_sysconf.ecname, esys->ec_name, 32); loongson_sysconf.workarounds |= esys->workarounds; - loongson_sysconf.nr_uarts = esys->nr_uarts; - if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS) - loongson_sysconf.nr_uarts = 1; - memcpy(loongson_sysconf.uarts, esys->uarts, - sizeof(struct uart_device) * loongson_sysconf.nr_uarts); - - loongson_sysconf.nr_sensors = esys->nr_sensors; - if (loongson_sysconf.nr_sensors > MAX_SENSORS) - loongson_sysconf.nr_sensors = 0; - if (loongson_sysconf.nr_sensors) - memcpy(loongson_sysconf.sensors, esys->sensors, - sizeof(struct sensor_device) * loongson_sysconf.nr_sensors); pr_info("CpuClock = %u\n", cpu_clock_freq); /* Read the ID of PCI host bridge to detect bridge type */ diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c index cfa788bca871..76e0a9636a0e 100644 --- a/arch/mips/loongson64/init.c +++ b/arch/mips/loongson64/init.c @@ -52,6 +52,10 @@ void __init szmem(unsigned int node) static unsigned long num_physpages; u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; + /* Otherwise come from DTB */ + if (loongson_sysconf.fw_interface != LOONGSON_LEFI) + return; + /* Parse memory information and activate */ for (i = 0; i < loongson_memmap->nr_map; i++) { node_id = loongson_memmap->map[i].node_id; @@ -94,12 +98,20 @@ static void __init prom_init_memory(void) void __init prom_init(void) { fw_init_cmdline(); - prom_init_env(); + + if (fw_arg2 == 0 || (fdt_magic(fw_arg2) == FDT_MAGIC)) { + loongson_sysconf.fw_interface = LOONGSON_DTB; + prom_dtb_init_env(); + } else { + loongson_sysconf.fw_interface = LOONGSON_LEFI; + prom_lefi_init_env(); + } /* init base address of io space */ set_io_port_base(PCI_IOBASE); - loongson_sysconf.early_config(); + if (loongson_sysconf.early_config) + loongson_sysconf.early_config(); #ifdef CONFIG_NUMA prom_init_numa_memory(); @@ -108,7 +120,10 @@ void __init prom_init(void) #endif /* Hardcode to CPU UART 0 */ - setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); + if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) + setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE), 0, 1024); + else + setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); register_smp_ops(&loongson3_smp_ops); board_nmi_handler_setup = mips_nmi_setup; @@ -126,7 +141,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_ return -ENOMEM; range->fwnode = fwnode; - range->size = size; + range->size = size = round_up(size, PAGE_SIZE); range->hw_start = hw_start; range->flags = LOGIC_PIO_CPU_MMIO; diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index a8f57bf01285..8315c871c435 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -27,7 +27,6 @@ #include <boot_param.h> #include <loongson.h> -static struct pglist_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; EXPORT_SYMBOL(__node_distances); struct pglist_data *__node_data[MAX_NUMNODES]; @@ -84,8 +83,12 @@ static void __init init_topology_matrix(void) static void __init node_mem_init(unsigned int node) { + struct pglist_data *nd; unsigned long node_addrspace_offset; unsigned long start_pfn, end_pfn; + unsigned long nd_pa; + int tnid; + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); node_addrspace_offset = nid_to_addrbase(node); pr_info("Node%d's addrspace_offset is 0x%lx\n", @@ -95,8 +98,16 @@ static void __init node_mem_init(unsigned int node) pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n", node, start_pfn, end_pfn); - __node_data[node] = prealloc__node_data + node; - + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node); + if (!nd_pa) + panic("Cannot allocate %zu bytes for node %d data\n", + nd_size, node); + nd = __va(nd_pa); + memset(nd, 0, sizeof(struct pglist_data)); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != node) + pr_info("NODE_DATA(%d) on node %d\n", node, tnid); + __node_data[node] = nd; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c deleted file mode 100644 index 9674ae1361a8..000000000000 --- a/arch/mips/loongson64/platform.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2009 Lemote Inc. - * Author: Wu Zhangjin, wuzhangjin@gmail.com - * Xiang Yu, xiangy@lemote.com - * Chen Huacai, chenhc@lemote.com - */ - -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/platform_device.h> -#include <asm/bootinfo.h> -#include <boot_param.h> -#include <loongson_hwmon.h> -#include <workarounds.h> - -static int __init loongson3_platform_init(void) -{ - int i; - struct platform_device *pdev; - - if (loongson_sysconf.ecname[0] != '\0') - platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0); - - for (i = 0; i < loongson_sysconf.nr_sensors; i++) { - if (loongson_sysconf.sensors[i].type > SENSOR_FAN) - continue; - - pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); - if (!pdev) - return -ENOMEM; - - pdev->name = loongson_sysconf.sensors[i].name; - pdev->id = loongson_sysconf.sensors[i].id; - pdev->dev.platform_data = &loongson_sysconf.sensors[i]; - platform_device_register(pdev); - } - - return 0; -} - -arch_initcall(loongson3_platform_init); diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c index 3bb8a1ed9348..c97bfdc8c922 100644 --- a/arch/mips/loongson64/reset.c +++ b/arch/mips/loongson64/reset.c @@ -6,9 +6,14 @@ * Copyright (C) 2009 Lemote, Inc. * Author: Zhangjin Wu, wuzhangjin@gmail.com */ +#include <linux/cpu.h> +#include <linux/delay.h> #include <linux/init.h> +#include <linux/kexec.h> #include <linux/pm.h> +#include <linux/slab.h> +#include <asm/bootinfo.h> #include <asm/idle.h> #include <asm/reboot.h> @@ -47,12 +52,120 @@ static void loongson_halt(void) } } +#ifdef CONFIG_KEXEC + +/* 0X80000000~0X80200000 is safe */ +#define MAX_ARGS 64 +#define KEXEC_CTRL_CODE 0xFFFFFFFF80100000UL +#define KEXEC_ARGV_ADDR 0xFFFFFFFF80108000UL +#define KEXEC_ARGV_SIZE COMMAND_LINE_SIZE +#define KEXEC_ENVP_SIZE 4800 + +static int kexec_argc; +static int kdump_argc; +static void *kexec_argv; +static void *kdump_argv; +static void *kexec_envp; + +static int loongson_kexec_prepare(struct kimage *image) +{ + int i, argc = 0; + unsigned int *argv; + char *str, *ptr, *bootloader = "kexec"; + + /* argv at offset 0, argv[] at offset KEXEC_ARGV_SIZE/2 */ + if (image->type == KEXEC_TYPE_DEFAULT) + argv = (unsigned int *)kexec_argv; + else + argv = (unsigned int *)kdump_argv; + + argv[argc++] = (unsigned int)(KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2); + + for (i = 0; i < image->nr_segments; i++) { + if (!strncmp(bootloader, (char *)image->segment[i].buf, + strlen(bootloader))) { + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + int offt; + str = (char *)argv + KEXEC_ARGV_SIZE/2; + memcpy(str, image->segment[i].buf, KEXEC_ARGV_SIZE/2); + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (int)(ptr - str + 1); + argv[argc] = KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2 + offt; + argc++; + } + ptr = strchr(ptr + 1, ' '); + } + break; + } + } + + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_argc = argc; + else + kdump_argc = argc; + + /* kexec/kdump need a safe page to save reboot_code_buffer */ + image->control_code_page = virt_to_page((void *)KEXEC_CTRL_CODE); + + return 0; +} + +static void loongson_kexec_shutdown(void) +{ +#ifdef CONFIG_SMP + int cpu; + + /* All CPUs go to reboot_code_buffer */ + for_each_possible_cpu(cpu) + if (!cpu_online(cpu)) + cpu_device_up(get_cpu_device(cpu)); +#endif + kexec_args[0] = kexec_argc; + kexec_args[1] = fw_arg1; + kexec_args[2] = fw_arg2; + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); + memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE); + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); +} + +static void loongson_crash_shutdown(struct pt_regs *regs) +{ + default_machine_crash_shutdown(regs); + kexec_args[0] = kdump_argc; + kexec_args[1] = fw_arg1; + kexec_args[2] = fw_arg2; + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); + memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE); + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); +} + +#endif + static int __init mips_reboot_setup(void) { _machine_restart = loongson_restart; _machine_halt = loongson_halt; pm_power_off = loongson_poweroff; +#ifdef CONFIG_KEXEC + kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); + kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); + kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL); + fw_arg1 = KEXEC_ARGV_ADDR; + memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE); + + _machine_kexec_prepare = loongson_kexec_prepare; + _machine_kexec_shutdown = loongson_kexec_shutdown; + _machine_crash_shutdown = loongson_crash_shutdown; +#endif + return 0; } diff --git a/arch/mips/loongson64/time.c b/arch/mips/loongson64/time.c index 91e842b58365..f6d2c1e30570 100644 --- a/arch/mips/loongson64/time.c +++ b/arch/mips/loongson64/time.c @@ -11,9 +11,33 @@ #include <asm/hpet.h> #include <loongson.h> +#include <linux/clk.h> +#include <linux/of_clk.h> void __init plat_time_init(void) { + struct clk *clk; + struct device_node *np; + + if (loongson_sysconf.fw_interface == LOONGSON_DTB) { + of_clk_init(NULL); + + np = of_get_cpu_node(0, NULL); + if (!np) { + pr_err("Failed to get CPU node\n"); + return; + } + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk)); + return; + } + + cpu_clock_freq = clk_get_rate(clk); + clk_put(clk); + } + /* setup mips r4k timer */ mips_hpt_frequency = cpu_clock_freq / 2; diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 865926a37775..4acc4f3d31f8 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -22,6 +22,10 @@ else obj-y += uasm-mips.o endif +ifndef CONFIG_EVA +obj-y += maccess.o +endif + obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += ioremap64.o pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o @@ -40,3 +44,5 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o + +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o diff --git a/arch/mips/mm/maccess.c b/arch/mips/mm/maccess.c new file mode 100644 index 000000000000..58173842c6be --- /dev/null +++ b/arch/mips/mm/maccess.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/uaccess.h> +#include <linux/kernel.h> + +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) +{ + /* highest bit set means kernel space */ + return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1); +} diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c new file mode 100644 index 000000000000..a1ced5e44951 --- /dev/null +++ b/arch/mips/mm/physaddr.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/mmdebug.h> +#include <linux/mm.h> + +#include <asm/sections.h> +#include <asm/io.h> +#include <asm/page.h> +#include <asm/dma.h> + +static inline bool __debug_virt_addr_valid(unsigned long x) +{ + /* high_memory does not get immediately defined, and there + * are early callers of __pa() against PAGE_OFFSET + */ + if (!high_memory && x >= PAGE_OFFSET) + return true; + + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) + return true; + + /* + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an + * actual physical address. Enough code relies on + * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it + * and always return true. + */ + if (x == MAX_DMA_ADDRESS) + return true; + + return false; +} + +phys_addr_t __virt_to_phys(volatile const void *x) +{ + WARN(!__debug_virt_addr_valid((unsigned long)x), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + x, x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long)_text || + x > (unsigned long)_end); + + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0fb1db8a8ef7..cd4afcdf3725 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -849,8 +849,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); - /* 1 0 1 0 1 << 6 xkphys cached */ - uasm_i_ori(p, ptr, ptr, 0x540); + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ + uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53)); uasm_i_drotr(p, ptr, ptr, 11); #elif defined(CONFIG_SMP) UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); @@ -1165,8 +1165,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, if (pgd_reg == -1) { vmalloc_branch_delay_filled = 1; - /* 1 0 1 0 1 << 6 xkphys cached */ - uasm_i_ori(p, ptr, ptr, 0x540); + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ + uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53)); + uasm_i_drotr(p, ptr, ptr, 11); } diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform index 41e0d2a2d325..f4616934d950 100644 --- a/arch/mips/mti-malta/Platform +++ b/arch/mips/mti-malta/Platform @@ -2,9 +2,5 @@ # MIPS Malta board # cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta -ifdef CONFIG_KVM_GUEST - load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000 -else - load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 -endif +load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 567720374d57..bbf1e38e1431 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -66,11 +66,6 @@ static void __init estimate_frequencies(void) int secs; u64 giccount = 0, gicstart = 0; -#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ - mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; - return; -#endif - local_irq_save(flags); if (mips_gic_present()) diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index f741b8c528e4..c1a655aee599 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -31,6 +31,7 @@ #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/dma-direct.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/bitops.h> diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 39052de915f3..468722c8a5c6 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -89,7 +89,6 @@ static void pcibios_scanbus(struct pci_controller *hose) hose->mem_resource, hose->mem_offset); pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset); - pci_add_resource(&resources, hose->busn_resource); list_splice_init(&resources, &bridge->windows); bridge->dev.parent = NULL; bridge->sysdata = hose; @@ -140,7 +139,6 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) struct of_pci_range range; struct of_pci_range_parser parser; - pr_info("PCI host bridge %pOF ranges:\n", node); hose->of_node = node; if (of_pci_range_parser_init(&parser, node)) @@ -151,23 +149,22 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) switch (range.flags & IORESOURCE_TYPE_BITS) { case IORESOURCE_IO: - pr_info(" IO 0x%016llx..0x%016llx\n", - range.cpu_addr, - range.cpu_addr + range.size - 1); hose->io_map_base = (unsigned long)ioremap(range.cpu_addr, range.size); res = hose->io_resource; break; case IORESOURCE_MEM: - pr_info(" MEM 0x%016llx..0x%016llx\n", - range.cpu_addr, - range.cpu_addr + range.size - 1); res = hose->mem_resource; break; } - if (res != NULL) - of_pci_range_to_resource(&range, node, res); + if (res != NULL) { + res->name = node->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } } } @@ -252,7 +249,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask) pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; - for (idx=0; idx < PCI_NUM_RESOURCES; idx++) { + for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) { /* Only set up the requested stuff */ if (!(mask & (1<<idx))) continue; @@ -282,9 +279,9 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask) int pcibios_enable_device(struct pci_dev *dev, int mask) { - int err; + int err = pcibios_enable_resources(dev, mask); - if ((err = pcibios_enable_resources(dev, mask)) < 0) + if (err < 0) return err; return pcibios_plat_dev_init(dev); diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index d36061603752..e032932348d6 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -30,6 +30,7 @@ #define RALINK_GPIOMODE 0x60 #define PPLL_CFG1 0x9c +#define PPLL_LD BIT(23) #define PPLL_DRV 0xa0 #define PDRV_SW_SET BIT(31) @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev) rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); mdelay(100); - if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) { - dev_err(&pdev->dev, "MT7620 PPLL unlock\n"); + if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) { + dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n"); reset_control_assert(rstpcie0); rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); return -1; diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index e1f12e398136..e9dd01431f21 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -41,7 +41,6 @@ #define RT2880_PCI_REG_ARBCTL 0x80 static void __iomem *rt2880_pci_base; -static DEFINE_SPINLOCK(rt2880_pci_lock); static u32 rt2880_pci_reg_read(u32 reg) { @@ -63,17 +62,14 @@ static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot, static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - unsigned long flags; u32 address; u32 data; address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); switch (size) { case 1: @@ -93,14 +89,12 @@ static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - unsigned long flags; u32 address; u32 data; address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); @@ -119,7 +113,6 @@ static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, } rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); return PCIBIOS_SUCCESSFUL; } @@ -151,36 +144,29 @@ static struct pci_controller rt2880_pci_controller = { static inline u32 rt2880_pci_read_u32(unsigned long reg) { - unsigned long flags; u32 address; u32 ret; address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); return ret; } static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) { - unsigned long flags; u32 address; address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); - spin_lock_irqsave(&rt2880_pci_lock, flags); rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); - spin_unlock_irqrestore(&rt2880_pci_lock, flags); } int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - u16 cmd; int irq = -1; if (dev->bus->number != 0) @@ -188,8 +174,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) switch (PCI_SLOT(dev->devfn)) { case 0x00: - rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); - (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); break; case 0x11: irq = RT288X_CPU_IRQ_PCI; @@ -201,16 +185,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) break; } - pci_write_config_byte((struct pci_dev *) dev, - PCI_CACHE_LINE_SIZE, 0x14); - pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF); - pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd); - cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | - PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; - pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd); - pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE, - dev->irq); return irq; } @@ -251,6 +225,30 @@ static int rt288x_pci_probe(struct platform_device *pdev) int pcibios_plat_dev_init(struct pci_dev *dev) { + static bool slot0_init; + + /* + * Nobody seems to initialize slot 0, but this platform requires it, so + * do it once when some other slot is being enabled. The PCI subsystem + * should configure other slots properly, so no need to do anything + * special for those. + */ + if (!slot0_init && dev->bus->number == 0) { + u16 cmd; + u32 bar0; + + slot0_init = true; + + pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + 0x08000000); + pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + &bar0); + + pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd); + } + return 0; } diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 0ac6346026d0..aebd4964ea34 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -100,7 +100,6 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc, unsigned bus, unsigned slot, unsigned func, unsigned reg) { - unsigned long flags; u32 address; u32 ret; @@ -116,7 +115,6 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, unsigned bus, unsigned slot, unsigned func, unsigned reg, u32 val) { - unsigned long flags; u32 address; address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); @@ -229,7 +227,6 @@ static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct rt3883_pci_controller *rpc; - unsigned long flags; u32 address; u32 data; @@ -263,7 +260,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct rt3883_pci_controller *rpc; - unsigned long flags; u32 address; u32 data; @@ -435,8 +431,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) if (!rpc->intc_of_node) { dev_err(dev, "%pOF has no %s child node", - rpc->intc_of_node, - "interrupt controller"); + np, "interrupt controller"); return -EINVAL; } @@ -450,8 +445,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) if (!rpc->pci_controller.of_node) { dev_err(dev, "%pOF has no %s child node", - rpc->intc_of_node, - "PCI host bridge"); + np, "PCI host bridge"); err = -EINVAL; goto err_put_intc_node; } diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 50f7d42cca5a..d2216942af18 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -385,7 +385,7 @@ static int bridge_domain_activate(struct irq_domain *domain, bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */ /* - * Enable sending of an interrupt clear packt to the hub on a high to + * Enable sending of an interrupt clear packet to the hub on a high to * low transition of the interrupt pin. * * IRIX sets additional bits in the address which are documented as diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index c10d8b233ab1..ec4daa63c5e3 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -26,6 +26,7 @@ choice config SOC_RT288X bool "RT288x" + select MIPS_AUTO_PFN_OFFSET select MIPS_L1_CACHE_SHIFT_4 select HAVE_LEGACY_CLK select HAVE_PCI diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c index 2f9d5acb38ea..f0bcb1051c30 100644 --- a/arch/mips/ralink/clk.c +++ b/arch/mips/ralink/clk.c @@ -70,6 +70,20 @@ long clk_round_rate(struct clk *clk, unsigned long rate) } EXPORT_SYMBOL_GPL(clk_round_rate); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + WARN_ON(clk); + return -1; +} +EXPORT_SYMBOL_GPL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + WARN_ON(clk); + return NULL; +} +EXPORT_SYMBOL_GPL(clk_get_parent); + void __init plat_time_init(void) { struct clk *clk; diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h index 4bc65b7a3241..87fc16751281 100644 --- a/arch/mips/ralink/common.h +++ b/arch/mips/ralink/common.h @@ -17,6 +17,7 @@ struct ralink_soc_info { unsigned long mem_size; unsigned long mem_size_min; unsigned long mem_size_max; + void (*mem_detect)(void); }; extern struct ralink_soc_info soc_info; @@ -27,7 +28,7 @@ extern void ralink_clk_add(const char *dev, unsigned long rate); extern void ralink_rst_init(void); -extern void prom_soc_init(struct ralink_soc_info *soc_info); +extern void __init prom_soc_init(struct ralink_soc_info *soc_info); __iomem void *plat_of_remap_node(const char *node); diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index fcf010038054..53a5969e61af 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -639,7 +639,7 @@ mt7628_dram_init(struct ralink_soc_info *soc_info) } } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); unsigned char *name = NULL; diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 5d74fc1c96ac..f82ad2a621f6 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -9,7 +9,9 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/sys_soc.h> +#include <linux/memblock.h> +#include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/smp-ops.h> #include <asm/mips-cps.h> @@ -49,6 +51,8 @@ #define MT7621_GPIO_MODE_SDHCI_SHIFT 18 #define MT7621_GPIO_MODE_SDHCI_GPIO 1 +static void *detect_magic __initdata = detect_memory_region; + static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; static struct rt2880_pmx_func uart3_grp[] = { @@ -110,6 +114,26 @@ phys_addr_t mips_cpc_default_phys_base(void) panic("Cannot detect cpc address"); } +static void __init mt7621_memory_detect(void) +{ + void *dm = &detect_magic; + phys_addr_t size; + + for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) { + if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic))) + break; + } + + if ((size == 256 * SZ_1M) && + (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) && + __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) { + memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE); + memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE); + } else { + memblock_add(MT7621_LOWMEM_BASE, size); + } +} + void __init ralink_of_remap(void) { rt_sysc_membase = plat_of_remap_node("mediatek,mt7621-sysc"); @@ -146,7 +170,7 @@ static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev) } } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); unsigned char *name = NULL; @@ -194,10 +218,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info) (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, (rev & CHIP_REV_ECO_MASK)); - soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN; - soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX; - soc_info->mem_base = MT7621_DRAM_BASE; - + soc_info->mem_detect = mt7621_memory_detect; rt2880_pinmux_data = mt7621_pinmux_data; soc_dev_init(soc_info, rev); diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 8286c3521476..0c5de07da097 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -78,6 +78,8 @@ void __init plat_mem_setup(void) of_scan_flat_dt(early_init_dt_find_memory, NULL); if (memory_dtb) of_scan_flat_dt(early_init_dt_scan_memory, NULL); + else if (soc_info.mem_detect) + soc_info.mem_detect(); else if (soc_info.mem_size) memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M); else diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c index 3f096897858c..34083c70ec68 100644 --- a/arch/mips/ralink/rt288x.c +++ b/arch/mips/ralink/rt288x.c @@ -77,7 +77,7 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE); const char *name; diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c index 496f966c05f9..c5b63c142705 100644 --- a/arch/mips/ralink/rt305x.c +++ b/arch/mips/ralink/rt305x.c @@ -214,7 +214,7 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); unsigned char *name; diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c index 8f3fe3106708..ff91f3531ad0 100644 --- a/arch/mips/ralink/rt3883.c +++ b/arch/mips/ralink/rt3883.c @@ -113,7 +113,7 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -void prom_soc_init(struct ralink_soc_info *soc_info) +void __init prom_soc_init(struct ralink_soc_info *soc_info) { void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE); const char *name; diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO deleted file mode 100644 index 160857ff1483..000000000000 --- a/arch/mips/sgi-ip27/TODO +++ /dev/null @@ -1,19 +0,0 @@ -1. Need to figure out why PCI writes to the IOC3 hang, and if it is okay -not to write to the IOC3 ever. -2. Need to figure out RRB allocation in bridge_startup(). -3. Need to figure out why address swaizzling is needed in inw/outw for -Qlogic scsi controllers. -4. Need to integrate ip27-klconfig.c:find_lboard and -ip27-init.c:find_lbaord_real. DONE -5. Is it okay to set calias space on all nodes as 0, instead of 8k as -in irix? -6. Investigate why things do not work without the setup_test() call -being invoked on all nodes in ip27-memory.c. -8. Too many do_page_faults invoked - investigate. -9. start_thread must turn off UX64 ... and define tlb_refill_debug. -10. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable -does not agree with pgd_bad/pmd_bad. -11. All intrs (ip27_do_irq handlers) are targeted at cpu A on the node. -This might need to change later. Only the timer intr is set up to be -received on both Cpu A and B. (ip27_do_irq()/bridge_startup()) -13. Cache flushing (specially the SMP version) has to be investigated. diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 79c434fece52..444b5e0e935f 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copytight (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org) - * Copytight (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/bcd.h> #include <linux/clockchips.h> diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 2131d3fd7333..1b2ea34c3d3b 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -46,7 +46,7 @@ CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -in CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y) endif -CFLAGS_REMOVE_vgettimeofday.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) ifdef CONFIG_MIPS_DISABLE_VDSO ifndef CONFIG_MIPS_LD_CAN_LINK_VDSO @@ -60,7 +60,7 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T -CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE) GCOV_PROFILE := n UBSAN_SANITIZE := n diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index 835ece9c00f1..bd235833b687 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c @@ -34,26 +34,46 @@ static char nvram_buf[NVRAM_SPACE]; static size_t nvram_len; static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000}; -static u32 find_nvram_size(void __iomem *end) +/** + * bcm47xx_nvram_is_valid - check for a valid NVRAM at specified memory + */ +static bool bcm47xx_nvram_is_valid(void __iomem *nvram) { - struct nvram_header __iomem *header; - int i; + return ((struct nvram_header *)nvram)->magic == NVRAM_MAGIC; +} + +/** + * bcm47xx_nvram_copy - copy NVRAM to internal buffer + */ +static void bcm47xx_nvram_copy(void __iomem *nvram_start, size_t res_size) +{ + struct nvram_header __iomem *header = nvram_start; + size_t copy_size; - for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { - header = (struct nvram_header *)(end - nvram_sizes[i]); - if (header->magic == NVRAM_MAGIC) - return nvram_sizes[i]; + copy_size = header->len; + if (copy_size > res_size) { + pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); + copy_size = res_size; + } + if (copy_size >= NVRAM_SPACE) { + pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", + copy_size, NVRAM_SPACE - 1); + copy_size = NVRAM_SPACE - 1; } - return 0; + __ioread32_copy(nvram_buf, nvram_start, DIV_ROUND_UP(copy_size, 4)); + nvram_buf[NVRAM_SPACE - 1] = '\0'; + nvram_len = copy_size; } -/* Probe for NVRAM header */ -static int nvram_find_and_copy(void __iomem *iobase, u32 lim) +/** + * bcm47xx_nvram_find_and_copy - find NVRAM on flash mapping & copy it + */ +static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_size) { - struct nvram_header __iomem *header; - u32 off; - u32 size; + size_t flash_size; + size_t offset; + int i; if (nvram_len) { pr_warn("nvram already initialized\n"); @@ -61,49 +81,31 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim) } /* TODO: when nvram is on nand flash check for bad blocks first. */ - off = FLASH_MIN; - while (off <= lim) { - /* Windowed flash access */ - size = find_nvram_size(iobase + off); - if (size) { - header = (struct nvram_header *)(iobase + off - size); - goto found; + + /* Try every possible flash size and check for NVRAM at its end */ + for (flash_size = FLASH_MIN; flash_size <= res_size; flash_size <<= 1) { + for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { + offset = flash_size - nvram_sizes[i]; + if (bcm47xx_nvram_is_valid(flash_start + offset)) + goto found; } - off <<= 1; } /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ - header = (struct nvram_header *)(iobase + 4096); - if (header->magic == NVRAM_MAGIC) { - size = NVRAM_SPACE; + + offset = 4096; + if (bcm47xx_nvram_is_valid(flash_start + offset)) goto found; - } - header = (struct nvram_header *)(iobase + 1024); - if (header->magic == NVRAM_MAGIC) { - size = NVRAM_SPACE; + offset = 1024; + if (bcm47xx_nvram_is_valid(flash_start + offset)) goto found; - } pr_err("no nvram found\n"); return -ENXIO; found: - __ioread32_copy(nvram_buf, header, sizeof(*header) / 4); - nvram_len = ((struct nvram_header *)(nvram_buf))->len; - if (nvram_len > size) { - pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); - nvram_len = size; - } - if (nvram_len >= NVRAM_SPACE) { - pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", - nvram_len, NVRAM_SPACE - 1); - nvram_len = NVRAM_SPACE - 1; - } - /* proceed reading data after header */ - __ioread32_copy(nvram_buf + sizeof(*header), header + 1, - DIV_ROUND_UP(nvram_len, 4)); - nvram_buf[NVRAM_SPACE - 1] = '\0'; + bcm47xx_nvram_copy(flash_start + offset, res_size - offset); return 0; } @@ -124,7 +126,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) if (!iobase) return -ENOMEM; - err = nvram_find_and_copy(iobase, lim); + err = bcm47xx_nvram_find_and_copy(iobase, lim); iounmap(iobase); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 09b91b81851c..8ccb30421806 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -16,10 +16,11 @@ #include <linux/smp.h> #include <linux/irqchip/chained_irq.h> -#include <boot_param.h> +#include <loongson.h> #define LIOINTC_CHIP_IRQ 32 #define LIOINTC_NUM_PARENT 4 +#define LIOINTC_NUM_CORES 4 #define LIOINTC_INTC_CHIP_START 0x20 @@ -42,6 +43,7 @@ struct liointc_handler_data { struct liointc_priv { struct irq_chip_generic *gc; struct liointc_handler_data handler[LIOINTC_NUM_PARENT]; + void __iomem *core_isr[LIOINTC_NUM_CORES]; u8 map_cache[LIOINTC_CHIP_IRQ]; bool has_lpc_irq_errata; }; @@ -51,11 +53,12 @@ static void liointc_chained_handle_irq(struct irq_desc *desc) struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip_generic *gc = handler->priv->gc; + int core = get_ebase_cpunum() % LIOINTC_NUM_CORES; u32 pending; chained_irq_enter(chip, desc); - pending = readl(gc->reg_base + LIOINTC_REG_INTC_STATUS); + pending = readl(handler->priv->core_isr[core]); if (!pending) { /* Always blame LPC IRQ if we have that bug */ @@ -141,6 +144,18 @@ static void liointc_resume(struct irq_chip_generic *gc) } static const char * const parent_names[] = {"int0", "int1", "int2", "int3"}; +static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; + +static void __iomem *liointc_get_reg_byname(struct device_node *node, + const char *name) +{ + int index = of_property_match_string(node, "reg-names", name); + + if (index < 0) + return NULL; + + return of_iomap(node, index); +} static int __init liointc_of_init(struct device_node *node, struct device_node *parent) @@ -159,10 +174,28 @@ static int __init liointc_of_init(struct device_node *node, if (!priv) return -ENOMEM; - base = of_iomap(node, 0); - if (!base) { - err = -ENODEV; - goto out_free_priv; + if (of_device_is_compatible(node, "loongson,liointc-2.0")) { + base = liointc_get_reg_byname(node, "main"); + if (!base) { + err = -ENODEV; + goto out_free_priv; + } + + for (i = 0; i < LIOINTC_NUM_CORES; i++) + priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]); + if (!priv->core_isr[0]) { + err = -ENODEV; + goto out_iounmap_base; + } + } else { + base = of_iomap(node, 0); + if (!base) { + err = -ENODEV; + goto out_free_priv; + } + + for (i = 0; i < LIOINTC_NUM_CORES; i++) + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; } for (i = 0; i < LIOINTC_NUM_PARENT; i++) { @@ -172,7 +205,7 @@ static int __init liointc_of_init(struct device_node *node, } if (!have_parent) { err = -ENODEV; - goto out_iounmap; + goto out_iounmap_isr; } sz = of_property_read_variable_u32_array(node, @@ -183,7 +216,7 @@ static int __init liointc_of_init(struct device_node *node, if (sz < 4) { pr_err("loongson-liointc: No parent_int_map\n"); err = -ENODEV; - goto out_iounmap; + goto out_iounmap_isr; } for (i = 0; i < LIOINTC_NUM_PARENT; i++) @@ -195,7 +228,7 @@ static int __init liointc_of_init(struct device_node *node, if (!domain) { pr_err("loongson-liointc: cannot add IRQ domain\n"); err = -EINVAL; - goto out_iounmap; + goto out_iounmap_isr; } err = irq_alloc_domain_generic_chips(domain, 32, 1, @@ -260,7 +293,13 @@ static int __init liointc_of_init(struct device_node *node, out_free_domain: irq_domain_remove(domain); -out_iounmap: +out_iounmap_isr: + for (i = 0; i < LIOINTC_NUM_CORES; i++) { + if (!priv->core_isr[i]) + continue; + iounmap(priv->core_isr[i]); + } +out_iounmap_base: iounmap(base); out_free_priv: kfree(priv); @@ -270,3 +309,4 @@ out_free_priv: IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); +IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init); diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index a3b98c86f077..cd905b44a630 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -8,12 +8,14 @@ * Optimization for constant divisors on 32-bit machines: * Copyright (C) 2006-2015 Nicolas Pitre * - * The semantics of do_div() are: + * The semantics of do_div() is, in C++ notation, observing that the name + * is a function-like macro and the n parameter has the semantics of a C++ + * reference: * - * uint32_t do_div(uint64_t *n, uint32_t base) + * uint32_t do_div(uint64_t &n, uint32_t base) * { - * uint32_t remainder = *n % base; - * *n = *n / base; + * uint32_t remainder = n % base; + * n = n / base; * return remainder; * } * diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5918e1686736..5443da4e5e8a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2066,6 +2066,16 @@ config TEST_SORT If unsure, say N. +config TEST_DIV64 + tristate "64bit/32bit division and modulo test" + depends on DEBUG_KERNEL || m + help + Enable this to turn on 'do_div()' function test. This test is + executed only once during system boot (so affects only boot time), + or at module load time. + + If unsure, say N. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL diff --git a/lib/math/Makefile b/lib/math/Makefile index be6909e943bd..7456edb864fc 100644 --- a/lib/math/Makefile +++ b/lib/math/Makefile @@ -4,3 +4,5 @@ obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o obj-$(CONFIG_CORDIC) += cordic.o obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o obj-$(CONFIG_RATIONAL) += rational.o + +obj-$(CONFIG_TEST_DIV64) += test_div64.o diff --git a/lib/math/test_div64.c b/lib/math/test_div64.c new file mode 100644 index 000000000000..c15edd688dd2 --- /dev/null +++ b/lib/math/test_div64.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Maciej W. Rozycki + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/time64.h> +#include <linux/types.h> + +#include <asm/div64.h> + +#define TEST_DIV64_N_ITER 1024 + +static const u64 test_div64_dividends[] = { + 0x00000000ab275080, + 0x0000000fe73c1959, + 0x000000e54c0a74b1, + 0x00000d4398ff1ef9, + 0x0000a18c2ee1c097, + 0x00079fb80b072e4a, + 0x0072db27380dd689, + 0x0842f488162e2284, + 0xf66745411d8ab063, +}; +#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends) + +#define TEST_DIV64_DIVISOR_0 0x00000009 +#define TEST_DIV64_DIVISOR_1 0x0000007c +#define TEST_DIV64_DIVISOR_2 0x00000204 +#define TEST_DIV64_DIVISOR_3 0x0000cb5b +#define TEST_DIV64_DIVISOR_4 0x00010000 +#define TEST_DIV64_DIVISOR_5 0x0008a880 +#define TEST_DIV64_DIVISOR_6 0x003fd3ae +#define TEST_DIV64_DIVISOR_7 0x0b658fac +#define TEST_DIV64_DIVISOR_8 0xdc08b349 + +static const u32 test_div64_divisors[] = { + TEST_DIV64_DIVISOR_0, + TEST_DIV64_DIVISOR_1, + TEST_DIV64_DIVISOR_2, + TEST_DIV64_DIVISOR_3, + TEST_DIV64_DIVISOR_4, + TEST_DIV64_DIVISOR_5, + TEST_DIV64_DIVISOR_6, + TEST_DIV64_DIVISOR_7, + TEST_DIV64_DIVISOR_8, +}; +#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors) + +static const struct { + u64 quotient; + u32 remainder; +} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = { + { + { 0x0000000013045e47, 0x00000001 }, + { 0x000000000161596c, 0x00000030 }, + { 0x000000000054e9d4, 0x00000130 }, + { 0x000000000000d776, 0x0000278e }, + { 0x000000000000ab27, 0x00005080 }, + { 0x00000000000013c4, 0x0004ce80 }, + { 0x00000000000002ae, 0x001e143c }, + { 0x000000000000000f, 0x0033e56c }, + { 0x0000000000000000, 0xab275080 }, + }, { + { 0x00000001c45c02d1, 0x00000000 }, + { 0x0000000020d5213c, 0x00000049 }, + { 0x0000000007e3d65f, 0x000001dd }, + { 0x0000000000140531, 0x000065ee }, + { 0x00000000000fe73c, 0x00001959 }, + { 0x000000000001d637, 0x0004e5d9 }, + { 0x0000000000003fc9, 0x000713bb }, + { 0x0000000000000165, 0x029abe7d }, + { 0x0000000000000012, 0x6e9f7e37 }, + }, { + { 0x000000197a3a0cf7, 0x00000002 }, + { 0x00000001d9632e5c, 0x00000021 }, + { 0x0000000071c28039, 0x000001cd }, + { 0x000000000120a844, 0x0000b885 }, + { 0x0000000000e54c0a, 0x000074b1 }, + { 0x00000000001a7bb3, 0x00072331 }, + { 0x00000000000397ad, 0x0002c61b }, + { 0x000000000000141e, 0x06ea2e89 }, + { 0x000000000000010a, 0xab002ad7 }, + }, { + { 0x0000017949e37538, 0x00000001 }, + { 0x0000001b62441f37, 0x00000055 }, + { 0x0000000694a3391d, 0x00000085 }, + { 0x0000000010b2a5d2, 0x0000a753 }, + { 0x000000000d4398ff, 0x00001ef9 }, + { 0x0000000001882ec6, 0x0005cbf9 }, + { 0x000000000035333b, 0x0017abdf }, + { 0x00000000000129f1, 0x0ab4520d }, + { 0x0000000000000f6e, 0x8ac0ce9b }, + }, { + { 0x000011f321a74e49, 0x00000006 }, + { 0x0000014d8481d211, 0x0000005b }, + { 0x0000005025cbd92d, 0x000001e3 }, + { 0x00000000cb5e71e3, 0x000043e6 }, + { 0x00000000a18c2ee1, 0x0000c097 }, + { 0x0000000012a88828, 0x00036c97 }, + { 0x000000000287f16f, 0x002c2a25 }, + { 0x00000000000e2cc7, 0x02d581e3 }, + { 0x000000000000bbf4, 0x1ba08c03 }, + }, { + { 0x0000d8db8f72935d, 0x00000005 }, + { 0x00000fbd5aed7a2e, 0x00000002 }, + { 0x000003c84b6ea64a, 0x00000122 }, + { 0x0000000998fa8829, 0x000044b7 }, + { 0x000000079fb80b07, 0x00002e4a }, + { 0x00000000e16b20fa, 0x0002a14a }, + { 0x000000001e940d22, 0x00353b2e }, + { 0x0000000000ab40ac, 0x06fba6ba }, + { 0x000000000008debd, 0x72d98365 }, + }, { + { 0x000cc3045b8fc281, 0x00000000 }, + { 0x0000ed1f48b5c9fc, 0x00000079 }, + { 0x000038fb9c63406a, 0x000000e1 }, + { 0x000000909705b825, 0x00000a62 }, + { 0x00000072db27380d, 0x0000d689 }, + { 0x0000000d43fce827, 0x00082b09 }, + { 0x00000001ccaba11a, 0x0037e8dd }, + { 0x000000000a13f729, 0x0566dffd }, + { 0x000000000085a14b, 0x23d36726 }, + }, { + { 0x00eafeb9c993592b, 0x00000001 }, + { 0x00110e5befa9a991, 0x00000048 }, + { 0x00041947b4a1d36a, 0x000000dc }, + { 0x00000a6679327311, 0x0000c079 }, + { 0x00000842f488162e, 0x00002284 }, + { 0x000000f4459740fc, 0x00084484 }, + { 0x0000002122c47bf9, 0x002ca446 }, + { 0x00000000b9936290, 0x004979c4 }, + { 0x00000000099ca89d, 0x9db446bf }, + }, { + { 0x1b60cece589da1d2, 0x00000001 }, + { 0x01fcb42be1453f5b, 0x0000004f }, + { 0x007a3f2457df0749, 0x0000013f }, + { 0x0001363130e3ec7b, 0x000017aa }, + { 0x0000f66745411d8a, 0x0000b063 }, + { 0x00001c757dfab350, 0x00048863 }, + { 0x000003dc4979c652, 0x00224ea7 }, + { 0x000000159edc3144, 0x06409ab3 }, + { 0x000000011eadfee3, 0xa99c48a8 }, + }, +}; + +static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j) +{ + return (quotient == test_div64_results[i][j].quotient && + remainder == test_div64_results[i][j].remainder); +} + +/* + * This needs to be a macro, because we don't want to rely on the compiler + * to do constant propagation, and `do_div' may take a different path for + * constants, so we do want to verify that as well. + */ +#define test_div64_one(dividend, divisor, i, j) ({ \ + bool result = true; \ + u64 quotient; \ + u32 remainder; \ + \ + quotient = dividend; \ + remainder = do_div(quotient, divisor); \ + if (!test_div64_verify(quotient, remainder, i, j)) { \ + pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \ + dividend, divisor, quotient, remainder); \ + pr_err("ERROR: expected value => %016llx,%08x\n",\ + test_div64_results[i][j].quotient, \ + test_div64_results[i][j].remainder); \ + result = false; \ + } \ + result; \ +}) + +/* + * Run calculation for the same divisor value expressed as a constant + * and as a variable, so as to verify the implementation for both cases + * should they be handled by different code execution paths. + */ +static bool __init test_div64(void) +{ + u64 dividend; + int i, j; + + for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) { + dividend = test_div64_dividends[i]; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7)) + return false; + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8)) + return false; + for (j = 0; j < SIZE_DIV64_DIVISORS; j++) { + if (!test_div64_one(dividend, test_div64_divisors[j], + i, j)) + return false; + } + } + return true; +} + +static int __init test_div64_init(void) +{ + struct timespec64 ts, ts0, ts1; + int i; + + pr_info("Starting 64bit/32bit division and modulo test\n"); + ktime_get_ts64(&ts0); + + for (i = 0; i < TEST_DIV64_N_ITER; i++) + if (!test_div64()) + break; + + ktime_get_ts64(&ts1); + ts = timespec64_sub(ts1, ts0); + pr_info("Completed 64bit/32bit division and modulo test, " + "%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec); + + return 0; +} + +static void __exit test_div64_exit(void) +{ +} + +module_init(test_div64_init); +module_exit(test_div64_exit); + +MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("64bit/32bit division and modulo test module"); |