diff options
author | Linus Torvalds | 2016-10-20 10:17:13 -0700 |
---|---|---|
committer | Linus Torvalds | 2016-10-20 10:17:13 -0700 |
commit | f4814e61836f5571d2b89169ba2e9ea59e2bc8c8 (patch) | |
tree | bef84dfb86c1207af9136322437200f2a34b1e9a /arch | |
parent | bdcff41597e823a4b5117698fb3ad0cf833e10ac (diff) | |
parent | f7881bd644474a4a62d7bd1ec801176f635f59ae (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
"Most of these are CC'd for stable, but there are a few fixing issues
introduced during the recent merge window too.
There's also a fix for the xgene PMU driver, but it seemed daft to
send as a separate pull request, so I've included it here with the
rest of the fixes.
- Fix ACPI boot due to recent broken NUMA changes
- Fix remote enabling of CPU features requiring PSTATE bit manipulation
- Add address range check when emulating user cache maintenance
- Fix LL/SC loops that allow compiler to introduce memory accesses
- Fix recently added write_sysreg_s macro
- Ensure MDCR_EL2 is initialised on qemu targets without a PMU
- Avoid kaslr breakage due to MODVERSIONs and DYNAMIC_FTRACE
- Correctly drive recent ld when building relocatable Image
- Remove junk IS_ERR check from xgene PMU driver added during merge window
- pr_cont fixes after core changes in the merge window"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: remove pr_cont abuse from mem_init
arm64: fix show_regs fallout from KERN_CONT changes
arm64: kernel: force ET_DYN ELF type for CONFIG_RELOCATABLE=y
arm64: suspend: Reconfigure PSTATE after resume from idle
arm64: mm: Set PSTATE.PAN from the cpu_enable_pan() call
arm64: cpufeature: Schedule enable() calls instead of calling them via IPI
arm64: Cortex-A53 errata workaround: check for kernel addresses
arm64: percpu: rewrite ll/sc loops in assembly
arm64: swp emulation: bound LL/SC retries before rescheduling
arm64: sysreg: Fix use of XZR in write_sysreg_s
arm64: kaslr: keep modules close to the kernel when DYNAMIC_FTRACE=y
arm64: kernel: Init MDCR_EL2 even in the absence of a PMU
perf: xgene: Remove bogus IS_ERR() check
arm64: kernel: numa: fix ACPI boot cpu numa node mapping
arm64: kaslr: fix breakage with CONFIG_MODVERSIONS=y
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm64/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/exec.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/module.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/percpu.h | 120 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/armv8_deprecated.c | 36 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu_errata.c | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 10 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 18 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 11 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 30 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 15 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 26 |
19 files changed, 183 insertions, 120 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 30398dbc940a..969ef880d234 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -915,7 +915,7 @@ config RANDOMIZE_BASE config RANDOMIZE_MODULE_REGION_FULL bool "Randomize the module region independently from the core kernel" - depends on RANDOMIZE_BASE + depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE default y help Randomizes the location of the module region without considering the diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index ab51aed6b6c1..3635b8662724 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 ifneq ($(CONFIG_RELOCATABLE),) -LDFLAGS_vmlinux += -pie -Bsymbolic +LDFLAGS_vmlinux += -pie -shared -Bsymbolic endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 758d74fedfad..a27c3245ba21 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -94,7 +94,7 @@ struct arm64_cpu_capabilities { u16 capability; int def_scope; /* default scope */ bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); - void (*enable)(void *); /* Called on all active CPUs */ + int (*enable)(void *); /* Called on all active CPUs */ union { struct { /* To be used for erratum handling only */ u32 midr_model; diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h index db0563c23482..f7865dd9d868 100644 --- a/arch/arm64/include/asm/exec.h +++ b/arch/arm64/include/asm/exec.h @@ -18,6 +18,9 @@ #ifndef __ASM_EXEC_H #define __ASM_EXEC_H +#include <linux/sched.h> + extern unsigned long arch_align_stack(unsigned long sp); +void uao_thread_switch(struct task_struct *next); #endif /* __ASM_EXEC_H */ diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index e12af6754634..06ff7fd9e81f 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -17,6 +17,7 @@ #define __ASM_MODULE_H #include <asm-generic/module.h> +#include <asm/memory.h> #define MODULE_ARCH_VERMAGIC "aarch64" @@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela, Elf64_Sym *sym); #ifdef CONFIG_RANDOMIZE_BASE +#ifdef CONFIG_MODVERSIONS +#define ARCH_RELOCATES_KCRCTAB +#define reloc_start (kimage_vaddr - KIMAGE_VADDR) +#endif extern u64 module_alloc_base; #else #define module_alloc_base ((u64)_etext - MODULES_VSIZE) diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 2fee2f59288c..5394c8405e66 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \ \ switch (size) { \ case 1: \ - do { \ - asm ("//__per_cpu_" #op "_1\n" \ - "ldxrb %w[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_1\n" \ + "1: ldxrb %w[ret], %[ptr]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \ - "stxrb %w[loop], %w[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u8 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxrb %w[loop], %w[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u8 *)ptr) \ + : [val] "Ir" (val)); \ break; \ case 2: \ - do { \ - asm ("//__per_cpu_" #op "_2\n" \ - "ldxrh %w[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_2\n" \ + "1: ldxrh %w[ret], %[ptr]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \ - "stxrh %w[loop], %w[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u16 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxrh %w[loop], %w[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u16 *)ptr) \ + : [val] "Ir" (val)); \ break; \ case 4: \ - do { \ - asm ("//__per_cpu_" #op "_4\n" \ - "ldxr %w[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_4\n" \ + "1: ldxr %w[ret], %[ptr]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \ - "stxr %w[loop], %w[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u32 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxr %w[loop], %w[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u32 *)ptr) \ + : [val] "Ir" (val)); \ break; \ case 8: \ - do { \ - asm ("//__per_cpu_" #op "_8\n" \ - "ldxr %[ret], %[ptr]\n" \ + asm ("//__per_cpu_" #op "_8\n" \ + "1: ldxr %[ret], %[ptr]\n" \ #asm_op " %[ret], %[ret], %[val]\n" \ - "stxr %w[loop], %[ret], %[ptr]\n" \ - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ - [ptr] "+Q"(*(u64 *)ptr) \ - : [val] "Ir" (val)); \ - } while (loop); \ + " stxr %w[loop], %[ret], %[ptr]\n" \ + " cbnz %w[loop], 1b" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u64 *)ptr) \ + : [val] "Ir" (val)); \ break; \ default: \ BUILD_BUG(); \ @@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, switch (size) { case 1: - do { - asm ("//__percpu_xchg_1\n" - "ldxrb %w[ret], %[ptr]\n" - "stxrb %w[loop], %w[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u8 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_1\n" + "1: ldxrb %w[ret], %[ptr]\n" + " stxrb %w[loop], %w[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u8 *)ptr) + : [val] "r" (val)); break; case 2: - do { - asm ("//__percpu_xchg_2\n" - "ldxrh %w[ret], %[ptr]\n" - "stxrh %w[loop], %w[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u16 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_2\n" + "1: ldxrh %w[ret], %[ptr]\n" + " stxrh %w[loop], %w[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u16 *)ptr) + : [val] "r" (val)); break; case 4: - do { - asm ("//__percpu_xchg_4\n" - "ldxr %w[ret], %[ptr]\n" - "stxr %w[loop], %w[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u32 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_4\n" + "1: ldxr %w[ret], %[ptr]\n" + " stxr %w[loop], %w[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u32 *)ptr) + : [val] "r" (val)); break; case 8: - do { - asm ("//__percpu_xchg_8\n" - "ldxr %[ret], %[ptr]\n" - "stxr %w[loop], %[val], %[ptr]\n" - : [loop] "=&r"(loop), [ret] "=&r"(ret), - [ptr] "+Q"(*(u64 *)ptr) - : [val] "r" (val)); - } while (loop); + asm ("//__percpu_xchg_8\n" + "1: ldxr %[ret], %[ptr]\n" + " stxr %w[loop], %[val], %[ptr]\n" + " cbnz %w[loop], 1b" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u64 *)ptr) + : [val] "r" (val)); break; default: BUILD_BUG(); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index df2e53d3a969..60e34824e18c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr) #endif -void cpu_enable_pan(void *__unused); -void cpu_enable_uao(void *__unused); -void cpu_enable_cache_maint_trap(void *__unused); +int cpu_enable_pan(void *__unused); +int cpu_enable_uao(void *__unused); +int cpu_enable_cache_maint_trap(void *__unused); #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e8d46e8e6079..6c80b3699cb8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -286,7 +286,7 @@ asm( #define write_sysreg_s(v, r) do { \ u64 __val = (u64)v; \ - asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \ + asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ } while (0) static inline void config_sctlr_el1(u32 clear, u32 set) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index bcaf6fba1b65..55d0adbf6509 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -21,6 +21,7 @@ /* * User space memory access functions */ +#include <linux/bitops.h> #include <linux/kasan-checks.h> #include <linux/string.h> #include <linux/thread_info.h> @@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs) flag; \ }) +/* + * When dealing with data aborts or instruction traps we may end up with + * a tagged userland pointer. Clear the tag to get a sane pointer to pass + * on to access_ok(), for instance. + */ +#define untagged_addr(addr) sign_extend64(addr, 55) + #define access_ok(type, addr, size) __range_ok(addr, size) #define user_addr_max get_fs diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 42ffdb54e162..b0988bb1bf64 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) /* * Error-checking SWP macros implemented using ldxr{b}/stxr{b} */ -#define __user_swpX_asm(data, addr, res, temp, B) \ + +/* Arbitrary constant to ensure forward-progress of the LL/SC loop */ +#define __SWP_LL_SC_LOOPS 4 + +#define __user_swpX_asm(data, addr, res, temp, temp2, B) \ __asm__ __volatile__( \ + " mov %w3, %w7\n" \ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) \ - "0: ldxr"B" %w2, [%3]\n" \ - "1: stxr"B" %w0, %w1, [%3]\n" \ + "0: ldxr"B" %w2, [%4]\n" \ + "1: stxr"B" %w0, %w1, [%4]\n" \ " cbz %w0, 2f\n" \ - " mov %w0, %w4\n" \ + " sub %w3, %w3, #1\n" \ + " cbnz %w3, 0b\n" \ + " mov %w0, %w5\n" \ " b 3f\n" \ "2:\n" \ " mov %w1, %w2\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ - "4: mov %w0, %w5\n" \ + "4: mov %w0, %w6\n" \ " b 3b\n" \ " .popsection" \ _ASM_EXTABLE(0b, 4b) \ _ASM_EXTABLE(1b, 4b) \ ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) \ - : "=&r" (res), "+r" (data), "=&r" (temp) \ - : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ + : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ + : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \ + "i" (__SWP_LL_SC_LOOPS) \ : "memory") -#define __user_swp_asm(data, addr, res, temp) \ - __user_swpX_asm(data, addr, res, temp, "") -#define __user_swpb_asm(data, addr, res, temp) \ - __user_swpX_asm(data, addr, res, temp, "b") +#define __user_swp_asm(data, addr, res, temp, temp2) \ + __user_swpX_asm(data, addr, res, temp, temp2, "") +#define __user_swpb_asm(data, addr, res, temp, temp2) \ + __user_swpX_asm(data, addr, res, temp, temp2, "b") /* * Bit 22 of the instruction encoding distinguishes between @@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data, } while (1) { - unsigned long temp; + unsigned long temp, temp2; if (type == TYPE_SWPB) - __user_swpb_asm(*data, address, res, temp); + __user_swpb_asm(*data, address, res, temp, temp2); else - __user_swp_asm(*data, address, res, temp); + __user_swp_asm(*data, address, res, temp, temp2); if (likely(res != -EAGAIN) || signal_pending(current)) break; diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0150394f4cab..b75e917aac46 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); } -static void cpu_enable_trap_ctr_access(void *__unused) +static int cpu_enable_trap_ctr_access(void *__unused) { /* Clear SCTLR_EL1.UCT */ config_sctlr_el1(SCTLR_EL1_UCT, 0); + return 0; } #define MIDR_RANGE(model, min, max) \ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d577f263cc4a..c02504ea304b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -19,7 +19,9 @@ #define pr_fmt(fmt) "CPU features: " fmt #include <linux/bsearch.h> +#include <linux/cpumask.h> #include <linux/sort.h> +#include <linux/stop_machine.h> #include <linux/types.h> #include <asm/cpu.h> #include <asm/cpufeature.h> @@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) { for (; caps->matches; caps++) if (caps->enable && cpus_have_cap(caps->capability)) - on_each_cpu(caps->enable, NULL, true); + /* + * Use stop_machine() as it schedules the work allowing + * us to modify PSTATE, instead of on_each_cpu() which + * uses an IPI, giving us a PSTATE that disappears when + * we return. + */ + stop_machine(caps->enable, NULL, cpu_online_mask); } /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 427f6d3f084c..332e33193ccf 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems b.lt 4f // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to - msr mdcr_el2, x0 // all PMU counters from EL1 4: + csel x0, xzr, x0, lt // all PMU counters from EL1 + msr mdcr_el2, x0 // (if they exist) /* Stage-2 translation */ msr vttbr_el2, xzr diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 27b2f1387df4..01753cd7d3f0 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -49,6 +49,7 @@ #include <asm/alternative.h> #include <asm/compat.h> #include <asm/cacheflush.h> +#include <asm/exec.h> #include <asm/fpsimd.h> #include <asm/mmu_context.h> #include <asm/processor.h> @@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs) printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", regs->pc, lr, regs->pstate); printk("sp : %016llx\n", sp); - for (i = top_reg; i >= 0; i--) { + + i = top_reg; + + while (i >= 0) { printk("x%-2d: %016llx ", i, regs->regs[i]); - if (i % 2 == 0) - printk("\n"); + i--; + + if (i % 2 == 0) { + pr_cont("x%-2d: %016llx ", i, regs->regs[i]); + i--; + } + + pr_cont("\n"); } printk("\n"); } @@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next) } /* Restore the UAO state depending on next's addr_limit */ -static void uao_thread_switch(struct task_struct *next) +void uao_thread_switch(struct task_struct *next) { if (IS_ENABLED(CONFIG_ARM64_UAO)) { if (task_thread_info(next)->addr_limit == KERNEL_DS) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d3f151cfd4a1..8507703dabe4 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) return; } bootcpu_valid = true; + early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid)); return; } diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index ad734142070d..bb0cd787a9d3 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,8 +1,11 @@ #include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/slab.h> +#include <asm/alternative.h> #include <asm/cacheflush.h> +#include <asm/cpufeature.h> #include <asm/debug-monitors.h> +#include <asm/exec.h> #include <asm/pgtable.h> #include <asm/memory.h> #include <asm/mmu_context.h> @@ -50,6 +53,14 @@ void notrace __cpu_suspend_exit(void) set_my_cpu_offset(per_cpu_offset(cpu)); /* + * PSTATE was not saved over suspend/resume, re-enable any detected + * features that might not have been set correctly. + */ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, + CONFIG_ARM64_PAN)); + uao_thread_switch(current); + + /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5ff020f8fb7f..c9986b3e0a96 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); } -void cpu_enable_cache_maint_trap(void *__unused) +int cpu_enable_cache_maint_trap(void *__unused) { config_sctlr_el1(SCTLR_EL1_UCI, 0); + return 0; } #define __user_cache_maint(insn, address, res) \ - asm volatile ( \ - "1: " insn ", %1\n" \ - " mov %w0, #0\n" \ - "2:\n" \ - " .pushsection .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %w0, %w2\n" \ - " b 2b\n" \ - " .popsection\n" \ - _ASM_EXTABLE(1b, 3b) \ - : "=r" (res) \ - : "r" (address), "i" (-EFAULT) ) + if (untagged_addr(address) >= user_addr_max()) \ + res = -EFAULT; \ + else \ + asm volatile ( \ + "1: " insn ", %1\n" \ + " mov %w0, #0\n" \ + "2:\n" \ + " .pushsection .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %w0, %w2\n" \ + " b 2b\n" \ + " .popsection\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (res) \ + : "r" (address), "i" (-EFAULT) ) static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) { diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 53d9159662fe..0f8788374815 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -29,7 +29,9 @@ #include <linux/sched.h> #include <linux/highmem.h> #include <linux/perf_event.h> +#include <linux/preempt.h> +#include <asm/bug.h> #include <asm/cpufeature.h> #include <asm/exception.h> #include <asm/debug-monitors.h> @@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, NOKPROBE_SYMBOL(do_debug_exception); #ifdef CONFIG_ARM64_PAN -void cpu_enable_pan(void *__unused) +int cpu_enable_pan(void *__unused) { + /* + * We modify PSTATE. This won't work from irq context as the PSTATE + * is discarded once we return from the exception. + */ + WARN_ON_ONCE(in_interrupt()); + config_sctlr_el1(SCTLR_EL1_SPAN, 0); + asm(SET_PSTATE_PAN(1)); + return 0; } #endif /* CONFIG_ARM64_PAN */ @@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused) * We need to enable the feature at runtime (instead of adding it to * PSR_MODE_EL1h) as the feature may not be implemented by the cpu. */ -void cpu_enable_uao(void *__unused) +int cpu_enable_uao(void *__unused) { asm(SET_PSTATE_UAO(1)); + return 0; } #endif /* CONFIG_ARM64_UAO */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 21c489bdeb4e..212c4d1e2f26 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -421,35 +421,35 @@ void __init mem_init(void) pr_notice("Virtual kernel memory layout:\n"); #ifdef CONFIG_KASAN - pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", + pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); #endif - pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", + pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", MLM(MODULES_VADDR, MODULES_END)); - pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", + pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", MLG(VMALLOC_START, VMALLOC_END)); - pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", + pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(_text, _etext)); - pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", + pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(__start_rodata, __init_begin)); - pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", + pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(__init_begin, __init_end)); - pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", + pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(_sdata, _edata)); - pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", + pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(__bss_start, __bss_stop)); - pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", + pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", MLK(FIXADDR_START, FIXADDR_TOP)); - pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", + pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", MLM(PCI_IO_START, PCI_IO_END)); #ifdef CONFIG_SPARSEMEM_VMEMMAP - pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", + pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); - pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", + pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), (unsigned long)virt_to_page(high_memory))); #endif - pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", + pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", MLM(__phys_to_virt(memblock_start_of_DRAM()), (unsigned long)high_memory)); |