From c396fe7f0c2efdf6c02d723f7bd492c58725c822 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 3 Jul 2017 14:37:46 +0100 Subject: arm64: uaccess: Remove redundant __force from addr cast in __range_ok Casting a pointer to an integral type doesn't require a __force attribute, because you'll need to cast back to a pointer in order to dereference the thing anyway. This patch removes the redundant __force cast from __range_ok. Reported-by: Luc Van Oostenryck Signed-off-by: Will Deacon --- arch/arm64/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8f0a1de11e4a..fab46a0ea223 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs) */ #define __range_ok(addr, size) \ ({ \ - unsigned long __addr = (unsigned long __force)(addr); \ + unsigned long __addr = (unsigned long)(addr); \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ -- cgit v1.2.3 From 32fb5d73c98b079e7c815b62e9d88a39ff8ce509 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Jul 2017 17:01:50 +0100 Subject: arm64: atomics: Remove '&' from '+&' asm constraint in lse atomics The lse implementation of atomic64_dec_if_positive uses the '+&' constraint, but the '&' is redundant and confusing in this case, since early clobber on a read/write operand is a strange concept. Replace the constraint with '+'. Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_lse.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 99fa69c9c3cf..9ef0797380cb 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) " sub x30, x30, %[ret]\n" " cbnz x30, 1b\n" "2:") - : [ret] "+&r" (x0), [v] "+Q" (v->counter) + : [ret] "+r" (x0), [v] "+Q" (v->counter) : : __LL_SC_CLOBBERS, "cc", "memory"); -- cgit v1.2.3 From 6f44a0bacb79a03972c83759711832b382b1b8ac Mon Sep 17 00:00:00 2001 From: Qiao Zhou Date: Fri, 7 Jul 2017 17:29:34 +0800 Subject: arm64: traps: disable irq in die() In current die(), the irq is disabled for __die() handle, not including the possible panic() handling. Since the log in __die() can take several hundreds ms, new irq might come and interrupt current die(). If the process calling die() holds some critical resource, and some other process scheduled later also needs it, then it would deadlock. The first panic will not be executed. So here disable irq for the whole flow of die(). Signed-off-by: Qiao Zhou Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c7c7088097be..d48f47080213 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock); void die(const char *str, struct pt_regs *regs, int err) { int ret; + unsigned long flags; + + raw_spin_lock_irqsave(&die_lock, flags); oops_enter(); - raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, regs); @@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err) bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); + + raw_spin_unlock_irqrestore(&die_lock, flags); + if (ret != NOTIFY_STOP) do_exit(SIGSEGV); } -- cgit v1.2.3 From a270f32735a20affe325c351c359f13603537d05 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 18 Jul 2017 16:42:42 -0500 Subject: arm64: Convert to using %pOF instead of full_name Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_ops.c | 4 ++-- arch/arm64/kernel/smp.c | 12 ++++++------ arch/arm64/kernel/topology.c | 22 +++++++++++----------- 3 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index e137ceaf5016..d16978213c5b 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu) * Don't warn spuriously. */ if (cpu != 0) - pr_err("%s: missing enable-method property\n", - dn->full_name); + pr_err("%pOF: missing enable-method property\n", + dn); } } else { enable_method = acpi_get_enable_method(cpu); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 321119881abf..dc66e6ec3a99 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn) */ cell = of_get_property(dn, "reg", NULL); if (!cell) { - pr_err("%s: missing reg property\n", dn->full_name); + pr_err("%pOF: missing reg property\n", dn); return INVALID_HWID; } @@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn) * Non affinity bits must be set to 0 in the DT */ if (hwid & ~MPIDR_HWID_BITMASK) { - pr_err("%s: invalid reg property\n", dn->full_name); + pr_err("%pOF: invalid reg property\n", dn); return INVALID_HWID; } return hwid; @@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void) goto next; if (is_mpidr_duplicate(cpu_count, hwid)) { - pr_err("%s: duplicate cpu reg properties in the DT\n", - dn->full_name); + pr_err("%pOF: duplicate cpu reg properties in the DT\n", + dn); goto next; } @@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void) */ if (hwid == cpu_logical_map(0)) { if (bootcpu_valid) { - pr_err("%s: duplicate boot cpu reg property in DT\n", - dn->full_name); + pr_err("%pOF: duplicate boot cpu reg property in DT\n", + dn); goto next; } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 79244c75eaec..8d48b233e6ce 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node) } } - pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name); + pr_crit("Unable to find CPU node for %pOF\n", cpu_node); of_node_put(cpu_node); return -1; @@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id, cpu_topology[cpu].core_id = core_id; cpu_topology[cpu].thread_id = i; } else { - pr_err("%s: Can't get CPU for thread\n", - t->full_name); + pr_err("%pOF: Can't get CPU for thread\n", + t); of_node_put(t); return -EINVAL; } @@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id, cpu = get_cpu_for_node(core); if (cpu >= 0) { if (!leaf) { - pr_err("%s: Core has both threads and CPU\n", - core->full_name); + pr_err("%pOF: Core has both threads and CPU\n", + core); return -EINVAL; } cpu_topology[cpu].cluster_id = cluster_id; cpu_topology[cpu].core_id = core_id; } else if (leaf) { - pr_err("%s: Can't get CPU for leaf core\n", core->full_name); + pr_err("%pOF: Can't get CPU for leaf core\n", core); return -EINVAL; } @@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) has_cores = true; if (depth == 0) { - pr_err("%s: cpu-map children should be clusters\n", - c->full_name); + pr_err("%pOF: cpu-map children should be clusters\n", + c); of_node_put(c); return -EINVAL; } @@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth) if (leaf) { ret = parse_core(c, cluster_id, core_id++); } else { - pr_err("%s: Non-leaf cluster with core %s\n", - cluster->full_name, name); + pr_err("%pOF: Non-leaf cluster with core %s\n", + cluster, name); ret = -EINVAL; } @@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth) } while (c); if (leaf && !has_cores) - pr_warn("%s: empty cluster\n", cluster->full_name); + pr_warn("%pOF: empty cluster\n", cluster); if (leaf) cluster_id++; -- cgit v1.2.3 From ece4b206be9934b3bb32adb1261545ead831c993 Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Thu, 20 Jul 2017 12:03:59 +0100 Subject: arm64/numa: Drop duplicate message When booting linux on a system without CONFIG_NUMA enabled, the following messages are printed during boot - NUMA: Faking a node at [mem 0x0000000000000000-0x00000083ffffffff] NUMA: Adding memblock [0x8000000000 - 0x8000e7ffff] on node 0 NUMA: Adding memblock [0x8000e80000 - 0x83f65cffff] on node 0 NUMA: Adding memblock [0x83f65d0000 - 0x83f665ffff] on node 0 NUMA: Adding memblock [0x83f6660000 - 0x83f676ffff] on node 0 NUMA: Adding memblock [0x83f6770000 - 0x83f678ffff] on node 0 NUMA: Adding memblock [0x83f6790000 - 0x83fb82ffff] on node 0 NUMA: Adding memblock [0x83fb830000 - 0x83fbc0ffff] on node 0 NUMA: Adding memblock [0x83fbc10000 - 0x83fbdfffff] on node 0 NUMA: Adding memblock [0x83fbe00000 - 0x83fbffffff] on node 0 NUMA: Adding memblock [0x83fc000000 - 0x83fffbffff] on node 0 NUMA: Adding memblock [0x83fffc0000 - 0x83fffdffff] on node 0 NUMA: Adding memblock [0x83fffe0000 - 0x83ffffffff] on node 0 NUMA: Initmem setup node 0 [mem 0x8000000000-0x83ffffffff] NUMA: NODE_DATA [mem 0x83fffec500-0x83fffedfff] The information is then duplicated by core kernel messages right after the above output. Early memory node ranges node 0: [mem 0x0000008000000000-0x0000008000e7ffff] node 0: [mem 0x0000008000e80000-0x00000083f65cffff] node 0: [mem 0x00000083f65d0000-0x00000083f665ffff] node 0: [mem 0x00000083f6660000-0x00000083f676ffff] node 0: [mem 0x00000083f6770000-0x00000083f678ffff] node 0: [mem 0x00000083f6790000-0x00000083fb82ffff] node 0: [mem 0x00000083fb830000-0x00000083fbc0ffff] node 0: [mem 0x00000083fbc10000-0x00000083fbdfffff] node 0: [mem 0x00000083fbe00000-0x00000083fbffffff] node 0: [mem 0x00000083fc000000-0x00000083fffbffff] node 0: [mem 0x00000083fffc0000-0x00000083fffdffff] node 0: [mem 0x00000083fffe0000-0x00000083ffffffff] Initmem setup node 0 [mem 0x0000008000000000-0x00000083ffffffff] Remove the duplication of memblock layout information printed during boot by dropping the messages from arm64 numa initialisation. Signed-off-by: Punit Agrawal Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/mm/numa.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index b388a99fea7b..dad128ba98bf 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) } node_set(nid, numa_nodes_parsed); - pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n", - start, (end - 1), nid); return ret; } @@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) void *nd; int tnid; - if (start_pfn < end_pfn) - pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid, - start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); - else + if (start_pfn >= end_pfn) pr_info("Initmem setup node %d []\n", nid); nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); -- cgit v1.2.3 From 288be97cc74e31b7871c75eb11a8dd768dcb535d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 12 Jul 2017 15:44:14 +0100 Subject: arm64/lib: copy_page: use consistent prefetch stride The optional prefetch instructions in the copy_page() routine are inconsistent: at the start of the function, two cachelines are prefetched beyond the one being loaded in the first iteration, but in the loop, the prefetch is one more line ahead. This appears to be unintentional, so let's fix it. While at it, fix the comment style and white space. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/lib/copy_page.S | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index c3cd65e31814..076c43715e64 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -30,9 +30,10 @@ */ ENTRY(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH - # Prefetch two cache lines ahead. - prfm pldl1strm, [x1, #128] - prfm pldl1strm, [x1, #256] + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] alternative_else_nop_endif ldp x2, x3, [x1] @@ -50,7 +51,7 @@ alternative_else_nop_endif subs x18, x18, #128 alternative_if ARM64_HAS_NO_HW_PREFETCH - prfm pldl1strm, [x1, #384] + prfm pldl1strm, [x1, #384] alternative_else_nop_endif stnp x2, x3, [x0] -- cgit v1.2.3 From d0153c7ff9226535a51e6a81f61656c9500957f4 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 25 Jul 2017 12:52:41 +0100 Subject: arm64: sysreg: Fix unprotected macro argmuent in write_sysreg write_sysreg() may misparse the value argument because it is used without parentheses to protect it. This patch adds the ( ) in order to avoid any surprises. Acked-by: Mark Rutland Signed-off-by: Dave Martin [will: same change to write_sysreg_s] Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 16e44fa9b3b6..248339e4aaf5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -492,7 +492,7 @@ asm( * the "%x0" template means XZR. */ #define write_sysreg(v, r) do { \ - u64 __val = (u64)v; \ + u64 __val = (u64)(v); \ asm volatile("msr " __stringify(r) ", %x0" \ : : "rZ" (__val)); \ } while (0) @@ -508,7 +508,7 @@ asm( }) #define write_sysreg_s(v, r) do { \ - u64 __val = (u64)v; \ + u64 __val = (u64)(v); \ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ } while (0) -- cgit v1.2.3 From 92bbd16e500c85bc210ba48caecbfbdb721bb5b4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 24 Jul 2017 11:46:09 +0100 Subject: arm64: mmu: Place guard page after mapping of kernel image The vast majority of virtual allocations in the vmalloc region are followed by a guard page, which can help to avoid overruning on vma into another, which may map a read-sensitive device. This patch adds a guard page to the end of the kernel image mapping (i.e. following the data/bss segments). Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 23c2d89a362e..f1eb15e0e864 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -496,7 +496,7 @@ void mark_rodata_ro(void) static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma, - int flags) + int flags, unsigned long vm_flags) { phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; @@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, early_pgtable_alloc, flags); + if (!(vm_flags & VM_NO_GUARD)) + size += PAGE_SIZE; + vma->addr = va_start; vma->phys_addr = pa_start; vma->size = size; - vma->flags = VM_MAP; + vma->flags = VM_MAP | vm_flags; vma->caller = __builtin_return_address(0); vm_area_add_early(vma); @@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd) * Only rodata will be remapped with different permissions later on, * all other segments are allowed to use contiguous mappings. */ - map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0); + map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0, + VM_NO_GUARD); map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, - &vmlinux_rodata, NO_CONT_MAPPINGS); + &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, - &vmlinux_inittext, 0); + &vmlinux_inittext, 0, VM_NO_GUARD); map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, - &vmlinux_initdata, 0); - map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0); + &vmlinux_initdata, 0, VM_NO_GUARD); + map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { /* -- cgit v1.2.3