aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller2017-04-20 10:35:33 -0400
committerDavid S. Miller2017-04-20 10:35:33 -0400
commit7b9f6da175f9387ebfc202f35e0d39514899ab19 (patch)
treea884c05aaeff40d8f80831549cccc820dcdd0f4f /arch
parent9868879f293c599ce13b584c5bd8800312970781 (diff)
parent1debdc8f9ebd07daf140e417b3841596911e0066 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
A function in kernel/bpf/syscall.c which got a bug fix in 'net' was moved to kernel/bpf/verifier.c in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi12
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c22
-rw-r--r--arch/arm/mach-omap2/omap-smc.S1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c90
-rw-r--r--arch/arm/mach-omap2/omap_device.c8
-rw-r--r--arch/arm/mach-orion5x/Kconfig1
-rw-r--r--arch/arm/plat-orion/common.c5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/parisc/include/asm/uaccess.h86
-rw-r--r--arch/parisc/lib/lusercopy.S27
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/mm/hugetlbpage.c16
-rw-r--r--arch/x86/include/asm/pmem.h42
19 files changed, 241 insertions, 83 deletions
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index efb5eae290a8..d42b98f15e8b 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -371,6 +371,8 @@
phy1: ethernet-phy@1 {
reg = <7>;
+ eee-broken-100tx;
+ eee-broken-1000t;
};
};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 9e43c443738a..9ba4b18c0cb2 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -672,6 +672,7 @@
ti,non-removable;
bus-width = <4>;
cap-power-off-card;
+ keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 2c9e56f4aac5..bbfb9d5a70a9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -283,6 +283,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <0>;
@@ -319,6 +320,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 8f9a69ca818c..efe53998c961 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -121,7 +121,7 @@
&i2c3 {
clock-frequency = <400000>;
at24@50 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 0467fb365bfc..306af6cadf26 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -66,12 +66,6 @@
opp-microvolt = <1200000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
-
- opp@1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1320000>;
- clock-latency-ns = <244144>; /* 8 32k periods */
- };
};
cpus {
@@ -81,16 +75,22 @@
operating-points-v2 = <&cpu0_opp_table>;
};
+ cpu@1 {
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <2>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <3>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
};
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index c4f2ace91ea2..3089d3bfa19b 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_get_cpu1_ns_pa_addr(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state)
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index d3fb5661bb5d..433db6d0b073 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
if (omap_secure_apis_support())
- boot_cpu = omap_read_auxcoreboot0();
+ boot_cpu = omap_read_auxcoreboot0() >> 9;
else
boot_cpu =
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 113ab2dd2ee9..03ec6d307c82 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -64,6 +64,7 @@
#include "prm-regbits-44xx.h"
static void __iomem *sar_base;
+static u32 old_cpu1_ns_pa_addr;
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
@@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
{}
#endif
+u32 omap4_get_cpu1_ns_pa_addr(void)
+{
+ return old_cpu1_ns_pa_addr;
+}
+
/**
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
* The purpose of this function is to manage low power programming
@@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
void __init omap4_mpuss_early_init(void)
{
unsigned long startup_pa;
+ void __iomem *ns_pa_addr;
- if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
+ if (!(soc_is_omap44xx() || soc_is_omap54xx()))
return;
sar_base = omap4_get_sar_ram_base();
- if (cpu_is_omap443x())
+ /* Save old NS_PA_ADDR for validity checks later on */
+ if (soc_is_omap44xx())
+ ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ else
+ ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
+
+ if (soc_is_omap443x())
startup_pa = __pa_symbol(omap4_secondary_startup);
- else if (cpu_is_omap446x())
+ else if (soc_is_omap446x())
startup_pa = __pa_symbol(omap4460_secondary_startup);
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
else
startup_pa = __pa_symbol(omap5_secondary_startup);
- if (cpu_is_omap44xx())
+ if (soc_is_omap44xx())
writel_relaxed(startup_pa, sar_base +
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
else
diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
index fd90125bffc7..72506e6cf9e7 100644
--- a/arch/arm/mach-omap2/omap-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
ldr r12, =0x103
dsb
smc #0
- mov r0, r0, lsr #9
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 003353b0b794..3faf454ba487 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
+#include <asm/sections.h>
#include <asm/smp_scu.h>
#include <asm/virt.h>
@@ -40,10 +41,14 @@
#define OMAP5_CORE_COUNT 0x2
+#define AUX_CORE_BOOT0_GP_RELEASE 0x020
+#define AUX_CORE_BOOT0_HS_RELEASE 0x200
+
struct omap_smp_config {
unsigned long cpu1_rstctrl_pa;
void __iomem *cpu1_rstctrl_va;
void __iomem *scu_base;
+ void __iomem *wakeupgen_base;
void *startup_addr;
};
@@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
static struct clockdomain *cpu1_clkdm;
static bool booted;
static struct powerdomain *cpu1_pwrdm;
- void __iomem *base = omap_get_wakeupgen_base();
/*
* Set synchronisation state between this boot processor
@@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* A barrier is added to ensure that write buffer is drained
*/
if (omap_secure_apis_support())
- omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+ omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
+ 0xfffffdff);
else
- writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
+ writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
+ cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
if (!cpu1_clkdm && !cpu1_pwrdm) {
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
set_cpu_possible(i, true);
}
+/*
+ * For now, just make sure the start-up address is not within the booting
+ * kernel space as that means we just overwrote whatever secondary_startup()
+ * code there was.
+ */
+static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
+{
+ if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
+ return false;
+
+ return true;
+}
+
+/*
+ * We may need to reset CPU1 before configuring, otherwise kexec boot can end
+ * up trying to use old kernel startup address or suspend-resume will
+ * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
+ * idle states.
+ */
+static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
+{
+ unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
+ bool needs_reset = false;
+ u32 released;
+
+ if (omap_secure_apis_support())
+ released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
+ else
+ released = readl_relaxed(cfg.wakeupgen_base +
+ OMAP_AUX_CORE_BOOT_0) &
+ AUX_CORE_BOOT0_GP_RELEASE;
+ if (released) {
+ pr_warn("smp: CPU1 not parked?\n");
+
+ return;
+ }
+
+ cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
+ OMAP_AUX_CORE_BOOT_1);
+ cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
+
+ /* Did the configured secondary_startup() get overwritten? */
+ if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
+ needs_reset = true;
+
+ /*
+ * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
+ * deeper idle state in WFI and will wake to an invalid address.
+ */
+ if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
+ !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
+ needs_reset = true;
+
+ if (!needs_reset || !c->cpu1_rstctrl_va)
+ return;
+
+ pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
+ cpu1_startup_pa, cpu1_ns_pa_addr);
+
+ writel_relaxed(1, c->cpu1_rstctrl_va);
+ readl_relaxed(c->cpu1_rstctrl_va);
+ writel_relaxed(0, c->cpu1_rstctrl_va);
+}
+
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
{
- void __iomem *base = omap_get_wakeupgen_base();
const struct omap_smp_config *c = NULL;
if (soc_is_omap443x())
@@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
/* Must preserve cfg.scu_base set earlier */
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
cfg.startup_addr = c->startup_addr;
+ cfg.wakeupgen_base = omap_get_wakeupgen_base();
if (soc_is_dra74x() || soc_is_omap54xx()) {
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (cfg.scu_base)
scu_enable(cfg.scu_base);
- /*
- * Reset CPU1 before configuring, otherwise kexec will
- * end up trying to use old kernel startup address.
- */
- if (cfg.cpu1_rstctrl_va) {
- writel_relaxed(1, cfg.cpu1_rstctrl_va);
- readl_relaxed(cfg.cpu1_rstctrl_va);
- writel_relaxed(0, cfg.cpu1_rstctrl_va);
- }
+ omap4_smp_maybe_reset_cpu1(&cfg);
/*
* Write the address of secondary startup routine into the
@@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
else
writel_relaxed(__pa_symbol(cfg.startup_addr),
- base + OMAP_AUX_CORE_BOOT_1);
+ cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
}
const struct smp_operations omap4_smp_ops __initconst = {
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index e920dd83e443..f989145480c8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
dev_err(dev, "failed to idle\n");
}
break;
+ case BUS_NOTIFY_BIND_DRIVER:
+ od = to_omap_device(pdev);
+ if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+ pm_runtime_status_suspended(dev)) {
+ od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+ pm_runtime_set_active(dev);
+ }
+ break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 633442ad4e4c..2a7bb6ccdcb7 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
select GPIOLIB
select MVEBU_MBUS
select PCI
+ select PHYLIB if NETDEVICES
select PLAT_ORION_LEGACY
help
Support for the following Marvell Orion 5x series SoCs:
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 9255b6d67ba5..aff6994950ba 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
eth_data, &orion_ge11);
}
+#ifdef CONFIG_ARCH_ORION5X
/*****************************************************************************
* Ethernet switch
****************************************************************************/
@@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
struct mdio_board_info *bd;
unsigned int i;
+ if (!IS_BUILTIN(CONFIG_PHYLIB))
+ return;
+
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
if (!strcmp(d->port_names[i], "cpu"))
break;
@@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
}
+#endif
/*****************************************************************************
* I2C
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 1c64ea2d23f9..0565779e66fa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -179,8 +179,10 @@
usbphy: phy@01c19400 {
compatible = "allwinner,sun50i-a64-usb-phy";
reg = <0x01c19400 0x14>,
+ <0x01c1a800 0x4>,
<0x01c1b800 0x4>;
reg-names = "phy_ctrl",
+ "pmu0",
"pmu1";
clocks = <&ccu CLK_USB_PHY0>,
<&ccu CLK_USB_PHY1>;
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 8442727f28d2..cbd4f4af8108 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -39,10 +39,10 @@
#define get_user __get_user
#if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr) __get_user_asm64(ptr)
+#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
#else
-#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
#endif
@@ -97,63 +97,87 @@ struct exception_data {
" mtsp %0,%%sr2\n\t" \
: : "r"(get_fs()) : )
-#define __get_user(x, ptr) \
-({ \
- register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val; \
- \
- load_sr2(); \
- switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm("ldb", ptr); break; \
- case 2: __get_user_asm("ldh", ptr); break; \
- case 4: __get_user_asm("ldw", ptr); break; \
- case 8: LDD_USER(ptr); break; \
- default: BUILD_BUG(); break; \
- } \
- \
- (x) = (__force __typeof__(*(ptr))) __gu_val; \
- __gu_err; \
+#define __get_user_internal(val, ptr) \
+({ \
+ register long __gu_err __asm__ ("r8") = 0; \
+ \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_asm(val, "ldb", ptr); break; \
+ case 2: __get_user_asm(val, "ldh", ptr); break; \
+ case 4: __get_user_asm(val, "ldw", ptr); break; \
+ case 8: LDD_USER(val, ptr); break; \
+ default: BUILD_BUG(); \
+ } \
+ \
+ __gu_err; \
})
-#define __get_user_asm(ldx, ptr) \
+#define __get_user(val, ptr) \
+({ \
+ load_sr2(); \
+ __get_user_internal(val, ptr); \
+})
+
+#define __get_user_asm(val, ldx, ptr) \
+{ \
+ register long __gu_val; \
+ \
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err));
+ : "r"(ptr), "1"(__gu_err)); \
+ \
+ (val) = (__force __typeof__(*(ptr))) __gu_val; \
+}
#if !defined(CONFIG_64BIT)
-#define __get_user_asm64(ptr) \
+#define __get_user_asm64(val, ptr) \
+{ \
+ union { \
+ unsigned long long l; \
+ __typeof__(*(ptr)) t; \
+ } __gu_tmp; \
+ \
__asm__(" copy %%r0,%R0\n" \
"1: ldw 0(%%sr2,%2),%0\n" \
"2: ldw 4(%%sr2,%2),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err));
+ : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
+ : "r"(ptr), "1"(__gu_err)); \
+ \
+ (val) = __gu_tmp.t; \
+}
#endif /* !defined(CONFIG_64BIT) */
-#define __put_user(x, ptr) \
+#define __put_user_internal(x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
- load_sr2(); \
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm("stb", __x, ptr); break; \
- case 2: __put_user_asm("sth", __x, ptr); break; \
- case 4: __put_user_asm("stw", __x, ptr); break; \
- case 8: STD_USER(__x, ptr); break; \
- default: BUILD_BUG(); break; \
- } \
+ case 1: __put_user_asm("stb", __x, ptr); break; \
+ case 2: __put_user_asm("sth", __x, ptr); break; \
+ case 4: __put_user_asm("stw", __x, ptr); break; \
+ case 8: STD_USER(__x, ptr); break; \
+ default: BUILD_BUG(); \
+ } \
\
__pu_err; \
})
+#define __put_user(x, ptr) \
+({ \
+ load_sr2(); \
+ __put_user_internal(x, ptr); \
+})
+
+
/*
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index f01188c044ee..85c28bb80fb7 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
add dst,len,end
/* short copy with less than 16 bytes? */
- cmpib,>>=,n 15,len,.Lbyte_loop
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
/* same alignment? */
xor src,dst,t0
@@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
/* loop until we are 64-bit aligned */
.Lalign_loop64:
extru dst,31,3,t1
- cmpib,=,n 0,t1,.Lcopy_loop_16
+ cmpib,=,n 0,t1,.Lcopy_loop_16_start
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop64
@@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+.Lcopy_loop_16_start:
ldi 31,t0
.Lcopy_loop_16:
cmpb,COND(>>=),n t0,len,.Lword_loop
@@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
/* loop until we are 32-bit aligned */
.Lalign_loop32:
extru dst,31,2,t1
- cmpib,=,n 0,t1,.Lcopy_loop_4
+ cmpib,=,n 0,t1,.Lcopy_loop_8
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop32
@@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
-.Lcopy_loop_4:
+.Lcopy_loop_8:
cmpib,COND(>>=),n 15,len,.Lbyte_loop
10: ldw 0(srcspc,src),t1
@@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
- b .Lcopy_loop_4
+ b .Lcopy_loop_8
ldo -16(len),len
.Lbyte_loop:
@@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
.Lunaligned_copy:
/* align until dst is 32bit-word-aligned */
extru dst,31,2,t1
- cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+ cmpib,=,n 0,t1,.Lcopy_dstaligned
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
@@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
cmpiclr,<> 1,t0,%r0
b,n .Lcase1
.Lcase0:
- cmpb,= %r0,len,.Lcda_finish
+ cmpb,COND(=) %r0,len,.Lcda_finish
nop
1: ldw,ma 4(srcspc,src), a3
@@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
ldo -1(len),len
- cmpb,=,n %r0,len,.Ldo0
+ cmpb,COND(=),n %r0,len,.Ldo0
.Ldo4:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
ldo -4(len),len
- cmpb,<> %r0,len,.Ldo4
+ cmpb,COND(<>) %r0,len,.Ldo4
nop
.Ldo0:
shrpw a2, a3, %sar, t0
@@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
/* fault exception fixup handlers: */
#ifdef CONFIG_64BIT
.Lcopy16_fault:
-10: b .Lcopy_done
- std,ma t1,8(dstspc,dst)
+ b .Lcopy_done
+10: std,ma t1,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
#endif
.Lcopy8_fault:
-10: b .Lcopy_done
- stw,ma t1,4(dstspc,dst)
+ b .Lcopy_done
+10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 68ac5c7cd982..a59deaef21e5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,7 +43,7 @@ config SPARC
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
- select PROVE_LOCKING_SMALL if PROVE_LOCKING
+ select LOCKDEP_SMALL if LOCKDEP
select ARCH_WANT_RELAX_ORDER
config SPARC32
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ee5273ad918d..7c29d38e6b99 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
pgd_t *pgd;
unsigned long next;
+ addr &= PMD_MASK;
+ if (addr < floor) {
+ addr += PMD_SIZE;
+ if (!addr)
+ return;
+ }
+ if (ceiling) {
+ ceiling &= PMD_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ end -= PMD_SIZE;
+ if (addr > end - 1)
+ return;
+
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 2c1ebeb4d737..529bb4a6487a 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
clwb(p);
}
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
- return iter_is_iovec(i) == false;
-}
-
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
- if (__iter_needs_pmem_wb(i))
+ /*
+ * In the iovec case on x86_64 copy_from_iter_nocache() uses
+ * non-temporal stores for the bulk of the transfer, but we need
+ * to manually flush if the transfer is unaligned. A cached
+ * memory copy is used when destination or size is not naturally
+ * aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ *
+ * In the non-iovec case the entire destination needs to be
+ * flushed.
+ */
+ if (iter_is_iovec(i)) {
+ unsigned long flushed, dest = (unsigned long) addr;
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+ arch_wb_cache_pmem(addr, 1);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ arch_wb_cache_pmem(addr, 1);
+ }
+
+ flushed = dest - (unsigned long) addr;
+ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+ arch_wb_cache_pmem(addr + bytes - 1, 1);
+ }
+ } else
arch_wb_cache_pmem(addr, bytes);
return len;