aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig23
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/nommu.c32
-rw-r--r--arch/arm/mm/pmsa-v7.c59
-rw-r--r--arch/arm/mm/pmsa-v8.c307
-rw-r--r--arch/arm/mm/proc-macros.S3
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-bugs.c174
-rw-r--r--arch/arm/mm/proc-v7.S154
11 files changed, 690 insertions, 77 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5a016bc80e26..96a7b6cf459b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -415,6 +415,7 @@ config CPU_V7
select CPU_CP15_MPU if !MMU
select CPU_HAS_ASID if MMU
select CPU_PABRT_V7
+ select CPU_SPECTRE if MMU
select CPU_THUMB_CAPABLE
select CPU_TLB_V7 if MMU
@@ -821,6 +822,28 @@ config CPU_BPREDICT_DISABLE
help
Say Y here to disable branch prediction. If unsure, say N.
+config CPU_SPECTRE
+ bool
+
+config HARDEN_BRANCH_PREDICTOR
+ bool "Harden the branch predictor against aliasing attacks" if EXPERT
+ depends on CPU_SPECTRE
+ default y
+ help
+ Speculation attacks against some high-performance processors rely
+ on being able to manipulate the branch predictor for a victim
+ context by executing aliasing branches in the attacker context.
+ Such attacks can be partially mitigated against by clearing
+ internal branch predictor state and limiting the prediction
+ logic in some situations.
+
+ This config option will take CPU-specific actions to harden
+ the branch predictor against aliasing attacks and may rely on
+ specific instruction sequences or control bits being set by
+ the system firmware.
+
+ If unsure, say Y.
+
config TLS_REG_EMUL
bool
select NEED_KUSER_HELPERS
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9dbb84923e12..7cb1699fbfc4 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
-obj-$(CONFIG_ARM_MPU) += pmsa-v7.o
+obj-$(CONFIG_ARM_MPU) += pmsa-v7.o pmsa-v8.o
endif
obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o
@@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o
-obj-$(CONFIG_CPU_V7) += proc-v7.o
+obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4b6613b5e042..af27f1c22d93 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -831,7 +831,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs)
{
int ret;
- unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 32034543f49c..84becc911ee3 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
+ if (addr > TASK_SIZE)
+ harden_branch_predictor();
+
clear_siginfo(&si);
#ifdef CONFIG_DEBUG_USER
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 7c087961b7ce..5dd6c58d653b 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -99,6 +99,38 @@ void __init arm_mm_memblock_reserve(void)
memblock_reserve(0, 1);
}
+static void __init adjust_lowmem_bounds_mpu(void)
+{
+ unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
+
+ switch (pmsa) {
+ case MMFR0_PMSAv7:
+ pmsav7_adjust_lowmem_bounds();
+ break;
+ case MMFR0_PMSAv8:
+ pmsav8_adjust_lowmem_bounds();
+ break;
+ default:
+ break;
+ }
+}
+
+static void __init mpu_setup(void)
+{
+ unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
+
+ switch (pmsa) {
+ case MMFR0_PMSAv7:
+ pmsav7_setup();
+ break;
+ case MMFR0_PMSAv8:
+ pmsav8_setup();
+ break;
+ default:
+ break;
+ }
+}
+
void __init adjust_lowmem_bounds(void)
{
phys_addr_t end;
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index e2853bfff74e..699fa2e88725 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -102,7 +102,7 @@ static inline u32 irbar_read(void)
static inline void rgnr_write(u32 v)
{
- writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR);
+ writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
}
/* Data-side / unified region attributes */
@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v)
/* Region access control register */
static inline void dracr_write(u32 v)
{
- u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0);
+ u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
- writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR);
+ writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
}
/* Region size register */
static inline void drsr_write(u32 v)
{
- u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16);
+ u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
- writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR);
+ writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
}
/* Region base address register */
static inline void drbar_write(u32 v)
{
- writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR);
+ writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
}
static inline u32 drbar_read(void)
{
- return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR);
+ return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
}
/* ARMv7-M only supports a unified MPU, so I-side operations are nop */
@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;}
#endif
-static int __init mpu_present(void)
-{
- return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
-}
-
static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
{
unsigned long subreg, bslots, sslots;
@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
bdiff = base - abase;
sdiff = p2size - asize;
- subreg = p2size / MPU_NR_SUBREGS;
+ subreg = p2size / PMSAv7_NR_SUBREGS;
if ((bdiff % subreg) || (sdiff % subreg))
return false;
@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
if (bslots || sslots) {
int i;
- if (subreg < MPU_MIN_SUBREG_SIZE)
+ if (subreg < PMSAv7_MIN_SUBREG_SIZE)
return false;
- if (bslots + sslots > MPU_NR_SUBREGS)
+ if (bslots + sslots > PMSAv7_NR_SUBREGS)
return false;
for (i = 0; i < bslots; i++)
_set_bit(i, &region->subreg);
for (i = 1; i <= sslots; i++)
- _set_bit(MPU_NR_SUBREGS - i, &region->subreg);
+ _set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
}
region->base = abase;
@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
}
/* MPU initialisation functions */
-void __init adjust_lowmem_bounds_mpu(void)
+void __init pmsav7_adjust_lowmem_bounds(void)
{
phys_addr_t specified_mem_size = 0, total_mem_size = 0;
struct memblock_region *reg;
@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void)
unsigned int mem_max_regions;
int num, i;
- if (!mpu_present())
- return;
-
- /* Free-up MPU_PROBE_REGION */
+ /* Free-up PMSAv7_PROBE_REGION */
mpu_min_region_order = __mpu_min_region_order();
/* How many regions are supported */
@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void)
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) {
- unsigned long subreg = mem[i].size / MPU_NR_SUBREGS;
+ unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
- &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg);
+ &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
}
if (total_mem_size != specified_mem_size) {
@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void)
u32 drbar_result, irbar_result;
/* We've kept a region free for this probing */
- rgnr_write(MPU_PROBE_REGION);
+ rgnr_write(PMSAv7_PROBE_REGION);
isb();
/*
* As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
return -ENOMEM;
/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
- size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
- size_data |= subregions << MPU_RSR_SD;
+ size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
+ size_data |= subregions << PMSAv7_RSR_SD;
if (need_flush)
flush_cache_all();
@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
/*
* Set up default MPU regions, doing nothing if there is no MPU
*/
-void __init mpu_setup(void)
+void __init pmsav7_setup(void)
{
int i, region = 0, err = 0;
- if (!mpu_present())
- return;
-
/* Setup MPU (order is important) */
/* Background */
err |= mpu_setup_region(region++, 0, 32,
- MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW,
+ PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
0, false);
#ifdef CONFIG_XIP_KERNEL
@@ -448,13 +437,13 @@ void __init mpu_setup(void)
* with BG region (which is uncachable), thus we need
* to clean and invalidate cache.
*/
- bool need_flush = region == MPU_RAM_REGION;
+ bool need_flush = region == PMSAv7_RAM_REGION;
if (!xip[i].size)
continue;
err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
- MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL,
+ PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
xip[i].subreg, need_flush);
}
#endif
@@ -465,14 +454,14 @@ void __init mpu_setup(void)
continue;
err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
- MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL,
+ PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
mem[i].subreg, false);
}
/* Vectors */
#ifndef CONFIG_CPU_V7M
err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
- MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL,
+ PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
0, false);
#endif
if (err) {
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
new file mode 100644
index 000000000000..617a83def88a
--- /dev/null
+++ b/arch/arm/mm/pmsa-v8.c
@@ -0,0 +1,307 @@
+/*
+ * Based on linux/arch/arm/pmsa-v7.c
+ *
+ * ARM PMSAv8 supporting functions.
+ */
+
+#include <linux/memblock.h>
+#include <linux/range.h>
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/mpu.h>
+
+#include <asm/memory.h>
+#include <asm/sections.h>
+
+#include "mm.h"
+
+#ifndef CONFIG_CPU_V7M
+
+#define PRSEL __ACCESS_CP15(c6, 0, c2, 1)
+#define PRBAR __ACCESS_CP15(c6, 0, c3, 0)
+#define PRLAR __ACCESS_CP15(c6, 0, c3, 1)
+
+static inline u32 prlar_read(void)
+{
+ return read_sysreg(PRLAR);
+}
+
+static inline u32 prbar_read(void)
+{
+ return read_sysreg(PRBAR);
+}
+
+static inline void prsel_write(u32 v)
+{
+ write_sysreg(v, PRSEL);
+}
+
+static inline void prbar_write(u32 v)
+{
+ write_sysreg(v, PRBAR);
+}
+
+static inline void prlar_write(u32 v)
+{
+ write_sysreg(v, PRLAR);
+}
+#else
+
+static inline u32 prlar_read(void)
+{
+ return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
+}
+
+static inline u32 prbar_read(void)
+{
+ return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
+}
+
+static inline void prsel_write(u32 v)
+{
+ writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
+}
+
+static inline void prbar_write(u32 v)
+{
+ writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
+}
+
+static inline void prlar_write(u32 v)
+{
+ writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
+}
+
+#endif
+
+static struct range __initdata io[MPU_MAX_REGIONS];
+static struct range __initdata mem[MPU_MAX_REGIONS];
+
+static unsigned int __initdata mpu_max_regions;
+
+static __init bool is_region_fixed(int number)
+{
+ switch (number) {
+ case PMSAv8_XIP_REGION:
+ case PMSAv8_KERNEL_REGION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void __init pmsav8_adjust_lowmem_bounds(void)
+{
+ phys_addr_t mem_end;
+ struct memblock_region *reg;
+ bool first = true;
+
+ for_each_memblock(memory, reg) {
+ if (first) {
+ phys_addr_t phys_offset = PHYS_OFFSET;
+
+ /*
+ * Initially only use memory continuous from
+ * PHYS_OFFSET */
+ if (reg->base != phys_offset)
+ panic("First memory bank must be contiguous from PHYS_OFFSET");
+ mem_end = reg->base + reg->size;
+ first = false;
+ } else {
+ /*
+ * memblock auto merges contiguous blocks, remove
+ * all blocks afterwards in one go (we can't remove
+ * blocks separately while iterating)
+ */
+ pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
+ &mem_end, &reg->base);
+ memblock_remove(reg->base, 0 - reg->base);
+ break;
+ }
+ }
+}
+
+static int __init __mpu_max_regions(void)
+{
+ static int max_regions;
+ u32 mpuir;
+
+ if (max_regions)
+ return max_regions;
+
+ mpuir = read_cpuid_mputype();
+
+ max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
+
+ return max_regions;
+}
+
+static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
+{
+ if (number > mpu_max_regions
+ || number >= MPU_MAX_REGIONS)
+ return -ENOENT;
+
+ dsb();
+ prsel_write(number);
+ isb();
+ prbar_write(bar);
+ prlar_write(lar);
+
+ mpu_rgn_info.rgns[number].prbar = bar;
+ mpu_rgn_info.rgns[number].prlar = lar;
+
+ mpu_rgn_info.used++;
+
+ return 0;
+}
+
+static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+ u32 bar, lar;
+
+ if (is_region_fixed(number))
+ return -EINVAL;
+
+ bar = start;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+
+ bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
+ lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
+
+ return __pmsav8_setup_region(number, bar, lar);
+}
+
+static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+ u32 bar, lar;
+
+ if (is_region_fixed(number))
+ return -EINVAL;
+
+ bar = start;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+
+ bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
+ lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
+
+ return __pmsav8_setup_region(number, bar, lar);
+}
+
+static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+ u32 bar, lar;
+
+ if (!is_region_fixed(number))
+ return -EINVAL;
+
+ bar = start;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
+
+ bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
+ lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
+
+ prsel_write(number);
+ isb();
+
+ if (prbar_read() != bar || prlar_read() != lar)
+ return -EINVAL;
+
+ /* Reserved region was set up early, we just need a record for secondaries */
+ mpu_rgn_info.rgns[number].prbar = bar;
+ mpu_rgn_info.rgns[number].prlar = lar;
+
+ mpu_rgn_info.used++;
+
+ return 0;
+}
+
+#ifndef CONFIG_CPU_V7M
+static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+ u32 bar, lar;
+
+ if (number == PMSAv8_KERNEL_REGION)
+ return -EINVAL;
+
+ bar = start;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
+
+ bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
+ lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
+
+ return __pmsav8_setup_region(number, bar, lar);
+}
+#endif
+
+void __init pmsav8_setup(void)
+{
+ int i, err = 0;
+ int region = PMSAv8_KERNEL_REGION;
+
+ /* How many regions are supported ? */
+ mpu_max_regions = __mpu_max_regions();
+
+ /* RAM: single chunk of memory */
+ add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base,
+ memblock.memory.regions[0].base + memblock.memory.regions[0].size);
+
+ /* IO: cover full 4G range */
+ add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
+
+ /* RAM and IO: exclude kernel */
+ subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
+ subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END));
+
+#ifdef CONFIG_XIP_KERNEL
+ /* RAM and IO: exclude xip */
+ subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
+ subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
+#endif
+
+#ifndef CONFIG_CPU_V7M
+ /* RAM and IO: exclude vectors */
+ subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
+ subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
+#endif
+ /* IO: exclude RAM */
+ for (i = 0; i < ARRAY_SIZE(mem); i++)
+ subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
+
+ /* Now program MPU */
+
+#ifdef CONFIG_XIP_KERNEL
+ /* ROM */
+ err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
+#endif
+ /* Kernel */
+ err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
+
+
+ /* IO */
+ for (i = 0; i < ARRAY_SIZE(io); i++) {
+ if (!io[i].end)
+ continue;
+
+ err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
+ }
+
+ /* RAM */
+ for (i = 0; i < ARRAY_SIZE(mem); i++) {
+ if (!mem[i].end)
+ continue;
+
+ err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
+ }
+
+ /* Vectors */
+#ifndef CONFIG_CPU_V7M
+ err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
+#endif
+ if (err)
+ pr_warn("MPU region initialization failure! %d", err);
+ else
+ pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
+ mpu_rgn_info.used, mpu_max_regions);
+}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f10e31d0730a..81d0efb055c6 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -273,13 +273,14 @@
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
.endm
-.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
+.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
.type \name\()_processor_functions, #object
.align 2
ENTRY(\name\()_processor_functions)
.word \dabort
.word \pabort
.word cpu_\name\()_proc_init
+ .word \bugs
.word cpu_\name\()_proc_fin
.word cpu_\name\()_reset
.word cpu_\name\()_do_idle
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index c6141a5435c3..f8d45ad2a515 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -41,11 +41,6 @@
* even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything.
*/
-ENTRY(cpu_ca8_switch_mm)
-#ifdef CONFIG_MMU
- mov r2, #0
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
-#endif
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
#endif
bx lr
ENDPROC(cpu_v7_switch_mm)
-ENDPROC(cpu_ca8_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
new file mode 100644
index 000000000000..5544b82a2e7a
--- /dev/null
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
+#include <linux/psci.h>
+#include <linux/smp.h>
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/proc-fns.h>
+#include <asm/system_misc.h>
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+
+extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+
+static void harden_branch_predictor_bpiall(void)
+{
+ write_sysreg(0, BPIALL);
+}
+
+static void harden_branch_predictor_iciallu(void)
+{
+ write_sysreg(0, ICIALLU);
+}
+
+static void __maybe_unused call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void __maybe_unused call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void cpu_v7_spectre_init(void)
+{
+ const char *spectre_v2_method = NULL;
+ int cpu = smp_processor_id();
+
+ if (per_cpu(harden_branch_predictor_fn, cpu))
+ return;
+
+ switch (read_cpuid_part()) {
+ case ARM_CPU_PART_CORTEX_A8:
+ case ARM_CPU_PART_CORTEX_A9:
+ case ARM_CPU_PART_CORTEX_A12:
+ case ARM_CPU_PART_CORTEX_A17:
+ case ARM_CPU_PART_CORTEX_A73:
+ case ARM_CPU_PART_CORTEX_A75:
+ if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
+ goto bl_error;
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ harden_branch_predictor_bpiall;
+ spectre_v2_method = "BPIALL";
+ break;
+
+ case ARM_CPU_PART_CORTEX_A15:
+ case ARM_CPU_PART_BRAHMA_B15:
+ if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
+ goto bl_error;
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ harden_branch_predictor_iciallu;
+ spectre_v2_method = "ICIALLU";
+ break;
+
+#ifdef CONFIG_ARM_PSCI
+ default:
+ /* Other ARM CPUs require no workaround */
+ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+ break;
+ /* fallthrough */
+ /* Cortex A57/A72 require firmware workaround */
+ case ARM_CPU_PART_CORTEX_A57:
+ case ARM_CPU_PART_CORTEX_A72: {
+ struct arm_smccc_res res;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ break;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if ((int)res.a0 != 0)
+ break;
+ if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
+ goto bl_error;
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ call_hvc_arch_workaround_1;
+ processor.switch_mm = cpu_v7_hvc_switch_mm;
+ spectre_v2_method = "hypervisor";
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if ((int)res.a0 != 0)
+ break;
+ if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
+ goto bl_error;
+ per_cpu(harden_branch_predictor_fn, cpu) =
+ call_smc_arch_workaround_1;
+ processor.switch_mm = cpu_v7_smc_switch_mm;
+ spectre_v2_method = "firmware";
+ break;
+
+ default:
+ break;
+ }
+ }
+#endif
+ }
+
+ if (spectre_v2_method)
+ pr_info("CPU%u: Spectre v2: using %s workaround\n",
+ smp_processor_id(), spectre_v2_method);
+ return;
+
+bl_error:
+ pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
+ cpu);
+}
+#else
+static void cpu_v7_spectre_init(void)
+{
+}
+#endif
+
+static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+ u32 mask, const char *msg)
+{
+ u32 aux_cr;
+
+ asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
+
+ if ((aux_cr & mask) != mask) {
+ if (!*warned)
+ pr_err("CPU%u: %s", smp_processor_id(), msg);
+ *warned = true;
+ return false;
+ }
+ return true;
+}
+
+static DEFINE_PER_CPU(bool, spectre_warned);
+
+static bool check_spectre_auxcr(bool *warned, u32 bit)
+{
+ return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
+ cpu_v7_check_auxcr_set(warned, bit,
+ "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
+}
+
+void cpu_v7_ca8_ibe(void)
+{
+ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
+ cpu_v7_spectre_init();
+}
+
+void cpu_v7_ca15_ibe(void)
+{
+ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+ cpu_v7_spectre_init();
+}
+
+void cpu_v7_bugs_init(void)
+{
+ cpu_v7_spectre_init();
+}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b528a15f460d..6fe52819e014 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -9,6 +9,7 @@
*
* This is the "shell" of the ARMv7 processor support.
*/
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
ret lr
ENDPROC(cpu_v7_dcache_clean_area)
+#ifdef CONFIG_ARM_PSCI
+ .arch_extension sec
+ENTRY(cpu_v7_smc_switch_mm)
+ stmfd sp!, {r0 - r3}
+ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+ smc #0
+ ldmfd sp!, {r0 - r3}
+ b cpu_v7_switch_mm
+ENDPROC(cpu_v7_smc_switch_mm)
+ .arch_extension virt
+ENTRY(cpu_v7_hvc_switch_mm)
+ stmfd sp!, {r0 - r3}
+ movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+ movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+ hvc #0
+ ldmfd sp!, {r0 - r3}
+ b cpu_v7_switch_mm
+ENDPROC(cpu_v7_smc_switch_mm)
+#endif
+ENTRY(cpu_v7_iciallu_switch_mm)
+ mov r3, #0
+ mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
+ b cpu_v7_switch_mm
+ENDPROC(cpu_v7_iciallu_switch_mm)
+ENTRY(cpu_v7_bpiall_switch_mm)
+ mov r3, #0
+ mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
+ b cpu_v7_switch_mm
+ENDPROC(cpu_v7_bpiall_switch_mm)
+
string cpu_v7_name, "ARMv7 Processor"
.align
@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
ENDPROC(cpu_v7_do_resume)
#endif
-/*
- * Cortex-A8
- */
- globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
- globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
- globl_equ cpu_ca8_reset, cpu_v7_reset
- globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
- globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
- globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
- globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
-#ifdef CONFIG_ARM_CPU_SUSPEND
- globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
- globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
-#endif
-
-/*
- * Cortex-A9 processor functions
- */
- globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
- globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
- globl_equ cpu_ca9mp_reset, cpu_v7_reset
- globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
- globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
- globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
- globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
.globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND
@@ -547,12 +554,79 @@ __v7_setup_stack:
__INITDATA
+ .weak cpu_v7_bugs_init
+
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
- define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ @ generic v7 bpiall on context switch
+ globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
+ globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
+ globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
+ globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
+ globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
+ globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
+ globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
+#ifdef CONFIG_ARM_CPU_SUSPEND
+ globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
+ globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
+#endif
+ define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+
+#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
+#else
+#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
+#endif
+
#ifndef CONFIG_ARM_LPAE
- define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
- define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ @ Cortex-A8 - always needs bpiall switch_mm implementation
+ globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
+ globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
+ globl_equ cpu_ca8_reset, cpu_v7_reset
+ globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
+ globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
+ globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
+ globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
+ globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
+#ifdef CONFIG_ARM_CPU_SUSPEND
+ globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
+ globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
+#endif
+ define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
+
+ @ Cortex-A9 - needs more registers preserved across suspend/resume
+ @ and bpiall switch_mm for hardening
+ globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
+ globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
+ globl_equ cpu_ca9mp_reset, cpu_v7_reset
+ globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
+ globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
+#else
+ globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
+#endif
+ globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
+ define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#endif
+
+ @ Cortex-A15 - needs iciallu switch_mm for hardening
+ globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
+ globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
+ globl_equ cpu_ca15_reset, cpu_v7_reset
+ globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
+ globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
+#else
+ globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
+#endif
+ globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
+ globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
+ globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
+ globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
+ define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
#ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif
@@ -669,7 +743,7 @@ __v7_ca7mp_proc_info:
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
- __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
+ __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/*
@@ -679,7 +753,7 @@ __v7_ca12mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
+ __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
@@ -689,7 +763,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
- __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns
+ __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*
@@ -699,9 +773,25 @@ __v7_b15mp_proc_info:
__v7_ca17mp_proc_info:
.long 0x410fc0e0
.long 0xff0ffff0
- __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
+ __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
+ /* ARM Ltd. Cortex A73 processor */
+ .type __v7_ca73_proc_info, #object
+__v7_ca73_proc_info:
+ .long 0x410fd090
+ .long 0xff0ffff0
+ __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+ .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
+
+ /* ARM Ltd. Cortex A75 processor */
+ .type __v7_ca75_proc_info, #object
+__v7_ca75_proc_info:
+ .long 0x410fd0a0
+ .long 0xff0ffff0
+ __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+ .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
+
/*
* Qualcomm Inc. Krait processors.
*/