aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorMaarten Lankhorst2019-05-28 09:38:59 +0200
committerMaarten Lankhorst2019-05-28 09:39:08 +0200
commit4672b1d65fc9b5a5ded911fbebb4853b892d5d89 (patch)
tree4ddeeb9fd195ce85f0f49525b94c39bdee8b94d9 /arch/arm64/kernel
parentcf57fdc8e42bd88dae1213a2bbe683d4b6c2190b (diff)
parent14ee642c2ab0a3d8a1ded11fade692d8b77172b9 (diff)
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
This picks up rc2 for us as well. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cpu_errata.c48
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/kaslr.c6
-rw-r--r--arch/arm64/kernel/module.c18
-rw-r--r--arch/arm64/kernel/syscall.c31
-rw-r--r--arch/arm64/kernel/traps.c4
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
7 files changed, 91 insertions, 24 deletions
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index e88d4e7bdfc7..d61beedba101 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -502,6 +502,22 @@ static const struct midr_range arm64_ssb_cpus[] = {
{},
};
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ /* Cortex-A76 r0p0 - r3p1 */
+ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+}
+#endif
+
static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
@@ -682,12 +698,16 @@ static const struct midr_range workaround_clean_cache[] = {
};
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static const struct midr_range erratum_1188873_list[] = {
- /* Cortex-A76 r0p0 to r2p0 */
- MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
- /* Neoverse-N1 r0p0 to r2p0 */
- MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+/*
+ * - 1188873 affects r0p0 to r2p0
+ * - 1418040 affects r0p0 to r3p1
+ */
+static const struct midr_range erratum_1418040_list[] = {
+ /* Cortex-A76 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Neoverse-N1 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
{},
};
#endif
@@ -809,11 +829,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_ssbd_mitigation,
.midr_range_list = arm64_ssb_cpus,
},
-#ifdef CONFIG_ARM64_ERRATUM_1188873
+#ifdef CONFIG_ARM64_ERRATUM_1418040
{
- .desc = "ARM erratum 1188873",
- .capability = ARM64_WORKAROUND_1188873,
- ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
+ .desc = "ARM erratum 1418040",
+ .capability = ARM64_WORKAROUND_1418040,
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
@@ -824,6 +844,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+ {
+ .desc = "ARM erratum 1463225",
+ .capability = ARM64_WORKAROUND_1463225,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1a7811b7e3c4..cd0c7af8e4a8 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -336,8 +336,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-alternative_if_not ARM64_WORKAROUND_1188873
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if_not ARM64_WORKAROUND_1418040
b 4f
alternative_else_nop_endif
/*
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index b09b6f75f759..06941c1fe418 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
- * Randomize the module region over a 4 GB window covering the
+ * Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
- module_range = SZ_4G - (u64)(_end - _stext);
- module_alloc_base = max((u64)_end + offset - SZ_4G,
+ module_range = SZ_2G - (u64)(_end - _stext);
+ module_alloc_base = max((u64)_end + offset - SZ_2G,
(u64)MODULES_VADDR);
} else {
/*
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f713e2fc4d75..f32359cffb01 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
* can simply omit this fallback in that case.
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + SZ_4G, GFP_KERNEL,
+ module_alloc_base + SZ_2G, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -96,15 +96,27 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
s64 sval = do_reloc(op, place, val);
+ /*
+ * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
+ * relative relocations as having a range of [-2^15, 2^16) or
+ * [-2^31, 2^32), respectively. However, in order to be able to detect
+ * overflows reliably, we have to choose whether we interpret such
+ * quantities as signed or as unsigned, and stick with it.
+ * The way we organize our address space requires a signed
+ * interpretation of 32-bit relative references, so let's use that
+ * for all R_AARCH64_PRELxx relocations. This means our upper
+ * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
+ */
+
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > U16_MAX)
+ if (sval < S16_MIN || sval > S16_MAX)
return -ERANGE;
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > U32_MAX)
+ if (sval < S32_MIN || sval > S32_MAX)
return -ERANGE;
break;
case 64:
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5610ac01c1ec..871c739f060a 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+ u32 reg, val;
+
+ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+ return;
+
+ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+ return;
+
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+ reg = read_sysreg(mdscr_el1);
+ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ write_sysreg(val, mdscr_el1);
+ asm volatile("msr daifclr, #8");
+ isb();
+
+ /* We will have taken a single-step exception by this point */
+
+ write_sysreg(reg, mdscr_el1);
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
+ cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ade32046f3fe..e6be1a6efc0a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -168,7 +168,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -181,9 +180,6 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
- end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs))
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 744b9dbaba03..fa230ff09aa1 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \
- $(call ld-option, --hash-style=sysv) -n -T
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ --build-id -n -T
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n