diff options
32 files changed, 402 insertions, 177 deletions
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt index d9082b91f10e..6fc03deb1c38 100644 --- a/Documentation/features/time/irq-time-acct/arch-support.txt +++ b/Documentation/features/time/irq-time-acct/arch-support.txt @@ -23,7 +23,7 @@ | openrisc: | TODO | | parisc: | .. | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | .. | | sh: | TODO | | sparc: | .. | diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ba937d85cb6c..b8ade91281bc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -5,7 +5,6 @@ config ARM select ARCH_32BIT_OFF_T select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VIRTUAL if MMU - select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE @@ -57,6 +56,7 @@ config ARM select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index ab2b654084fa..fc748122f1e0 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -441,7 +441,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); -extern int devmem_is_allowed(unsigned long pfn); #endif /* diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index b8d912ac9e61..a0f8a0ca0788 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -165,25 +165,3 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); } - -#ifdef CONFIG_STRICT_DEVMEM - -#include <linux/ioport.h> - -/* - * devmem_is_allowed() checks to see if /dev/mem access to a certain - * address is valid. The argument is a physical page number. - * We mimic x86 here by disallowing access to system RAM as well as - * device-exclusive MMIO regions. This effectively disable read()/write() - * on /dev/mem. - */ -int devmem_is_allowed(unsigned long pfn) -{ - if (iomem_is_exclusive(pfn << PAGE_SHIFT)) - return 0; - if (!page_is_ram(pfn)) - return 1; - return 0; -} - -#endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9f0139ba8a1d..81463eb537bb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -13,7 +13,6 @@ config ARM64 select ARCH_BINFMT_ELF_STATE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE - select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_FAST_MULTIPLIER @@ -113,6 +112,7 @@ config ARM64 select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP select GENERIC_PTDUMP select GENERIC_SCHED_CLOCK diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index fd172c41df90..5ea8656a2030 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -201,6 +201,4 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); -extern int devmem_is_allowed(unsigned long pfn); - #endif /* __ASM_IO_H */ diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 3028bacbc4e9..07937b49cb88 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -47,24 +47,3 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); } - -#ifdef CONFIG_STRICT_DEVMEM - -#include <linux/ioport.h> - -/* - * devmem_is_allowed() checks to see if /dev/mem access to a certain address - * is valid. The argument is a physical page number. We mimic x86 here by - * disallowing access to system RAM as well as device-exclusive MMIO regions. - * This effectively disable read()/write() on /dev/mem. - */ -int devmem_is_allowed(unsigned long pfn) -{ - if (iomem_is_exclusive(pfn << PAGE_SHIFT)) - return 0; - if (!page_is_ram(pfn)) - return 1; - return 0; -} - -#endif diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 880c2b3b65d0..81b76d44725d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -15,6 +15,7 @@ config RISCV select ARCH_CLOCKSOURCE_INIT select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU + select ARCH_STACKWALK select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU @@ -43,6 +44,7 @@ config RISCV select GENERIC_IOREMAP select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_SHOW + select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP select GENERIC_PTDUMP if MMU select GENERIC_SCHED_CLOCK @@ -68,6 +70,7 @@ config RISCV select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GCC_PLUGINS select HAVE_GENERIC_VDSO if MMU && 64BIT + select HAVE_IRQ_TIME_ACCOUNTING select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 0289a97325d1..8c29e553ef7f 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -96,5 +96,11 @@ $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @$(kecho) ' Kernel: $(boot)/$@ is ready' +Image.%: Image + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + zinstall install: $(Q)$(MAKE) $(build)=$(boot) $@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore index 574c10f8ff68..90e66adb7de5 100644 --- a/arch/riscv/boot/.gitignore +++ b/arch/riscv/boot/.gitignore @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only Image -Image.gz +Image.* loader loader.lds +loader.bin diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index c59fca695f9d..03404c84f971 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -18,7 +18,7 @@ KCOV_INSTRUMENT := n OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S -targets := Image loader +targets := Image Image.* loader loader.o loader.lds loader.bin $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 3a9971b1210f..1595c5b60cfd 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -9,5 +9,7 @@ extern char _start[]; extern char _start_kernel[]; +extern char __init_data_begin[], __init_data_end[]; +extern char __init_text_begin[], __init_text_end[]; #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index d690b08dff2a..211eb8244a45 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -15,11 +15,15 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +int set_memory_rw_nx(unsigned long addr, int numpages); +void protect_kernel_text_data(void); #else static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline void protect_kernel_text_data(void) {}; +static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } #endif int set_direct_map_invalid_noflush(struct page *page); diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h new file mode 100644 index 000000000000..470a65c4ccdc --- /dev/null +++ b/arch/riscv/include/asm/stacktrace.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_STACKTRACE_H +#define _ASM_RISCV_STACKTRACE_H + +#include <linux/sched.h> +#include <asm/ptrace.h> + +struct stackframe { + unsigned long fp; + unsigned long ra; +}; + +extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + bool (*fn)(void *, unsigned long), void *arg); + +#endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index 924af13f8555..5477e7ecb6e1 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -12,16 +12,16 @@ #define __HAVE_ARCH_MEMSET extern asmlinkage void *memset(void *, int, size_t); extern asmlinkage void *__memset(void *, int, size_t); - #define __HAVE_ARCH_MEMCPY extern asmlinkage void *memcpy(void *, const void *, size_t); extern asmlinkage void *__memcpy(void *, const void *, size_t); - +#define __HAVE_ARCH_MEMMOVE +extern asmlinkage void *memmove(void *, const void *, size_t); +extern asmlinkage void *__memmove(void *, const void *, size_t); /* For those files which don't want to check by kasan. */ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) - #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memset(s, c, n) __memset(s, c, n) - +#define memmove(dst, src, len) __memmove(dst, src, len) #endif #endif /* _ASM_RISCV_STRING_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index fa896c5f7ccb..f6caf4d9ca15 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -56,5 +56,3 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_EFI) += efi.o - -clean: diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index db203442c08f..b79ffa3561fd 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -11,6 +11,8 @@ #include <asm/thread_info.h> #include <asm/ptrace.h> +void asm_offsets(void); + void asm_offsets(void) { OFFSET(TASK_THREAD_RA, task_struct, thread.ra); diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 7e849797c9c3..16e9941900c4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -182,7 +182,6 @@ setup_trap_vector: END(_start) - __INIT ENTRY(_start_kernel) /* Mask all interrupts */ csrw CSR_IE, zero diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index cf190197a22f..0bb1854dce83 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -4,11 +4,7 @@ #include <linux/perf_event.h> #include <linux/uaccess.h> -/* Kernel callchain */ -struct stackframe { - unsigned long fp; - unsigned long ra; -}; +#include <asm/stacktrace.h> /* * Get the return address for a single stackframe and return a pointer to the @@ -74,13 +70,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, fp = user_backtrace(entry, fp, 0); } -bool fill_callchain(unsigned long pc, void *entry) +static bool fill_callchain(void *entry, unsigned long pc) { return perf_callchain_store(entry, pc); } -void notrace walk_stackframe(struct task_struct *task, - struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg); void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 450492e1cb4e..5ab1c7e1a6ed 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -11,5 +11,7 @@ */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 117f3212a8e4..1d85e9bf783c 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -4,6 +4,8 @@ * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> */ #include <linux/init.h> @@ -22,6 +24,7 @@ #include <asm/cpu_ops.h> #include <asm/early_ioremap.h> #include <asm/setup.h> +#include <asm/set_memory.h> #include <asm/sections.h> #include <asm/sbi.h> #include <asm/tlbflush.h> @@ -51,6 +54,163 @@ atomic_t hart_lottery __section(".sdata"); unsigned long boot_cpu_hartid; static DEFINE_PER_CPU(struct cpu, cpu_devices); +/* + * Place kernel memory regions on the resource tree so that + * kexec-tools can retrieve them from /proc/iomem. While there + * also add "System RAM" regions for compatibility with other + * archs, and the rest of the known regions for completeness. + */ +static struct resource code_res = { .name = "Kernel code", }; +static struct resource data_res = { .name = "Kernel data", }; +static struct resource rodata_res = { .name = "Kernel rodata", }; +static struct resource bss_res = { .name = "Kernel bss", }; + +static int __init add_resource(struct resource *parent, + struct resource *res) +{ + int ret = 0; + + ret = insert_resource(parent, res); + if (ret < 0) { + pr_err("Failed to add a %s resource at %llx\n", + res->name, (unsigned long long) res->start); + return ret; + } + + return 1; +} + +static int __init add_kernel_resources(struct resource *res) +{ + int ret = 0; + + /* + * The memory region of the kernel image is continuous and + * was reserved on setup_bootmem, find it here and register + * it as a resource, then register the various segments of + * the image as child nodes + */ + if (!(res->start <= code_res.start && res->end >= data_res.end)) + return 0; + + res->name = "Kernel image"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + /* + * We removed a part of this region on setup_bootmem so + * we need to expand the resource for the bss to fit in. + */ + res->end = bss_res.end; + + ret = add_resource(&iomem_resource, res); + if (ret < 0) + return ret; + + ret = add_resource(res, &code_res); + if (ret < 0) + return ret; + + ret = add_resource(res, &rodata_res); + if (ret < 0) + return ret; + + ret = add_resource(res, &data_res); + if (ret < 0) + return ret; + + ret = add_resource(res, &bss_res); + + return ret; +} + +static void __init init_resources(void) +{ + struct memblock_region *region = NULL; + struct resource *res = NULL; + int ret = 0; + + code_res.start = __pa_symbol(_text); + code_res.end = __pa_symbol(_etext) - 1; + code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + rodata_res.start = __pa_symbol(__start_rodata); + rodata_res.end = __pa_symbol(__end_rodata) - 1; + rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + data_res.start = __pa_symbol(_data); + data_res.end = __pa_symbol(_edata) - 1; + data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + bss_res.start = __pa_symbol(__bss_start); + bss_res.end = __pa_symbol(__bss_stop) - 1; + bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + /* + * Start by adding the reserved regions, if they overlap + * with /memory regions, insert_resource later on will take + * care of it. + */ + for_each_reserved_mem_region(region) { + res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct resource)); + + res->name = "Reserved"; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; + + ret = add_kernel_resources(res); + if (ret < 0) + goto error; + else if (ret) + continue; + + /* + * Ignore any other reserved regions within + * system memory. + */ + if (memblock_is_memory(res->start)) + continue; + + ret = add_resource(&iomem_resource, res); + if (ret < 0) + goto error; + } + + /* Add /memory regions to the resource tree */ + for_each_mem_region(region) { + res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct resource)); + + if (unlikely(memblock_is_nomap(region))) { + res->name = "Reserved"; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } + + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + + ret = add_resource(&iomem_resource, res); + if (ret < 0) + goto error; + } + + return; + + error: + memblock_free((phys_addr_t) res, sizeof(struct resource)); + /* Better an empty resource tree than an inconsistent one */ + release_child_resources(&iomem_resource); +} + + static void __init parse_dtb(void) { /* Early scan of device tree from init memory */ @@ -81,6 +241,7 @@ void __init setup_arch(char **cmdline_p) efi_init(); setup_bootmem(); paging_init(); + init_resources(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else @@ -90,6 +251,11 @@ void __init setup_arch(char **cmdline_p) pr_err("No DTB found in kernel mappings\n"); #endif + if (IS_ENABLED(CONFIG_RISCV_SBI)) + sbi_init(); + + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + protect_kernel_text_data(); #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif @@ -98,10 +264,6 @@ void __init setup_arch(char **cmdline_p) kasan_init(); #endif -#if IS_ENABLED(CONFIG_RISCV_SBI) - sbi_init(); -#endif - #ifdef CONFIG_SMP setup_smp(); #endif @@ -123,3 +285,12 @@ static int __init topology_init(void) return 0; } subsys_initcall(topology_init); + +void free_initmem(void) +{ + unsigned long init_begin = (unsigned long)__init_begin; + unsigned long init_end = (unsigned long)__init_end; + + set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); + free_initmem_default(POISON_FREE_INITMEM); +} diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 595342910c3f..48b870a685b3 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -12,17 +12,14 @@ #include <linux/stacktrace.h> #include <linux/ftrace.h> +#include <asm/stacktrace.h> + register unsigned long sp_in_global __asm__("sp"); #ifdef CONFIG_FRAME_POINTER -struct stackframe { - unsigned long fp; - unsigned long ra; -}; - void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, - bool (*fn)(unsigned long, void *), void *arg) + bool (*fn)(void *, unsigned long), void *arg) { unsigned long fp, sp, pc; @@ -46,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, unsigned long low, high; struct stackframe *frame; - if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) + if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) break; /* Validate frame pointer */ @@ -66,7 +63,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, #else /* !CONFIG_FRAME_POINTER */ void notrace walk_stackframe(struct task_struct *task, - struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) + struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) { unsigned long sp, pc; unsigned long *ksp; @@ -88,7 +85,7 @@ void notrace walk_stackframe(struct task_struct *task, ksp = (unsigned long *)sp; while (!kstack_end(ksp)) { - if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) + if (__kernel_text_address(pc) && unlikely(!fn(arg, pc))) break; pc = (*ksp++) - 0x4; } @@ -96,13 +93,12 @@ void notrace walk_stackframe(struct task_struct *task, #endif /* CONFIG_FRAME_POINTER */ - -static bool print_trace_address(unsigned long pc, void *arg) +static bool print_trace_address(void *arg, unsigned long pc) { const char *loglvl = arg; print_ip_sym(loglvl, pc); - return false; + return true; } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) @@ -111,14 +107,14 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } -static bool save_wchan(unsigned long pc, void *arg) +static bool save_wchan(void *arg, unsigned long pc) { if (!in_sched_functions(pc)) { unsigned long *p = arg; *p = pc; - return true; + return false; } - return false; + return true; } unsigned long get_wchan(struct task_struct *task) @@ -130,42 +126,12 @@ unsigned long get_wchan(struct task_struct *task) return pc; } - #ifdef CONFIG_STACKTRACE -static bool __save_trace(unsigned long pc, void *arg, bool nosched) -{ - struct stack_trace *trace = arg; - - if (unlikely(nosched && in_sched_functions(pc))) - return false; - if (unlikely(trace->skip > 0)) { - trace->skip--; - return false; - } - - trace->entries[trace->nr_entries++] = pc; - return (trace->nr_entries >= trace->max_entries); -} - -static bool save_trace(unsigned long pc, void *arg) -{ - return __save_trace(pc, arg, false); -} - -/* - * Save stack-backtrace addresses into a stack_trace buffer. - */ -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) -{ - walk_stackframe(tsk, NULL, save_trace, trace); -} -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); - -void save_stack_trace(struct stack_trace *trace) +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) { - save_stack_trace_tsk(NULL, trace); + walk_stackframe(task, regs, consume_entry, cookie); } -EXPORT_SYMBOL_GPL(save_stack_trace); #endif /* CONFIG_STACKTRACE */ diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 3ffbd6cbdb86..de03cb22d0e9 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -29,8 +29,30 @@ SECTIONS HEAD_TEXT_SECTION . = ALIGN(PAGE_SIZE); + .text : { + _text = .; + _stext = .; + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + ENTRY_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + _etext = .; + } + + . = ALIGN(SECTION_ALIGN); __init_begin = .; - INIT_TEXT_SECTION(PAGE_SIZE) + __init_text_begin = .; + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) ALIGN(SECTION_ALIGN) { \ + _sinittext = .; \ + INIT_TEXT \ + _einittext = .; \ + } + . = ALIGN(8); __soc_early_init_table : { __soc_early_init_table_start = .; @@ -47,35 +69,28 @@ SECTIONS { EXIT_TEXT } + + __init_text_end = .; + . = ALIGN(SECTION_ALIGN); +#ifdef CONFIG_EFI + . = ALIGN(PECOFF_SECTION_ALIGNMENT); + __pecoff_text_end = .; +#endif + /* Start of init data section */ + __init_data_begin = .; + INIT_DATA_SECTION(16) .exit.data : { EXIT_DATA } PERCPU_SECTION(L1_CACHE_BYTES) - __init_end = .; - . = ALIGN(SECTION_ALIGN); - .text : { - _text = .; - _stext = .; - TEXT_TEXT - SCHED_TEXT - CPUIDLE_TEXT - LOCK_TEXT - KPROBES_TEXT - ENTRY_TEXT - IRQENTRY_TEXT - SOFTIRQENTRY_TEXT - *(.fixup) - _etext = .; + .rel.dyn : { + *(.rel.dyn*) } -#ifdef CONFIG_EFI - . = ALIGN(PECOFF_SECTION_ALIGNMENT); - __pecoff_text_end = .; -#endif - - INIT_DATA_SECTION(16) + __init_data_end = .; + __init_end = .; /* Start of data section */ _sdata = .; @@ -105,10 +120,6 @@ SECTIONS BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) - .rel.dyn : { - *(.rel.dyn*) - } - #ifdef CONFIG_EFI . = ALIGN(PECOFF_SECTION_ALIGNMENT); __pecoff_data_virt_size = ABSOLUTE(. - __pecoff_text_end); diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 47e7a8204460..ac6171e9c19e 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -2,5 +2,6 @@ lib-y += delay.o lib-y += memcpy.o lib-y += memset.o +lib-y += memmove.o lib-$(CONFIG_MMU) += uaccess.o lib-$(CONFIG_64BIT) += tishift.o diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S new file mode 100644 index 000000000000..07d1d2152ba5 --- /dev/null +++ b/arch/riscv/lib/memmove.S @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> +#include <asm/asm.h> + +ENTRY(__memmove) +WEAK(memmove) + move t0, a0 + move t1, a1 + + beq a0, a1, exit_memcpy + beqz a2, exit_memcpy + srli t2, a2, 0x2 + + slt t3, a0, a1 + beqz t3, do_reverse + + andi a2, a2, 0x3 + li t4, 1 + beqz t2, byte_copy + +word_copy: + lw t3, 0(a1) + addi t2, t2, -1 + addi a1, a1, 4 + sw t3, 0(a0) + addi a0, a0, 4 + bnez t2, word_copy + beqz a2, exit_memcpy + j byte_copy + +do_reverse: + add a0, a0, a2 + add a1, a1, a2 + andi a2, a2, 0x3 + li t4, -1 + beqz t2, reverse_byte_copy + +reverse_word_copy: + addi a1, a1, -4 + addi t2, t2, -1 + lw t3, 0(a1) + addi a0, a0, -4 + sw t3, 0(a0) + bnez t2, reverse_word_copy + beqz a2, exit_memcpy + +reverse_byte_copy: + addi a0, a0, -1 + addi a1, a1, -1 + +byte_copy: + lb t3, 0(a1) + addi a2, a2, -1 + sb t3, 0(a0) + add a1, a1, t4 + add a0, a0, t4 + bnez a2, byte_copy + +exit_memcpy: + move a0, t0 + move a1, t1 + ret +END(__memmove) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 8e577f14f120..13ba533f462b 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -13,6 +13,7 @@ #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <linux/set_memory.h> +#include <linux/dma-map-ops.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> @@ -41,13 +42,14 @@ struct pt_alloc_ops { #endif }; +static phys_addr_t dma32_phys_limit __ro_after_init; + static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, - (unsigned long) PFN_PHYS(max_low_pfn))); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; @@ -181,6 +183,7 @@ void __init setup_bootmem(void) max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = max_pfn; + dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD @@ -194,6 +197,7 @@ void __init setup_bootmem(void) memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); early_init_fdt_scan_reserved_mem(); + dma_contiguous_reserve(dma32_phys_limit); memblock_allow_resize(); memblock_dump_all(); } @@ -618,48 +622,33 @@ static inline void setup_vm_final(void) #endif /* CONFIG_MMU */ #ifdef CONFIG_STRICT_KERNEL_RWX -void mark_rodata_ro(void) +void protect_kernel_text_data(void) { - unsigned long text_start = (unsigned long)_text; - unsigned long text_end = (unsigned long)_etext; + unsigned long text_start = (unsigned long)_start; + unsigned long init_text_start = (unsigned long)__init_text_begin; + unsigned long init_data_start = (unsigned long)__init_data_begin; unsigned long rodata_start = (unsigned long)__start_rodata; unsigned long data_start = (unsigned long)_data; unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); - set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT); - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); + set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); + set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); + /* rodata section is marked readonly in mark_rodata_ro */ set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT); - - debug_checkwx(); } -#endif -static void __init resource_init(void) +void mark_rodata_ro(void) { - struct memblock_region *region; - - for_each_mem_region(region) { - struct resource *res; - - res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); + unsigned long rodata_start = (unsigned long)__start_rodata; + unsigned long data_start = (unsigned long)_data; - if (memblock_is_nomap(region)) { - res->name = "reserved"; - res->flags = IORESOURCE_MEM; - } else { - res->name = "System RAM"; - res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - } - res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); - res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - request_resource(&iomem_resource, res); - } + debug_checkwx(); } +#endif void __init paging_init(void) { @@ -667,7 +656,6 @@ void __init paging_init(void) sparse_init(); setup_zero_page(); zone_sizes_init(); - resource_init(); } #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 87ba5a68bbb8..5e49e4b4a4cc 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -128,6 +128,12 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, return ret; } +int set_memory_rw_nx(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE), + __pgprot(_PAGE_EXEC)); +} + int set_memory_ro(unsigned long addr, int numpages) { return __set_memory(addr, numpages, __pgprot(_PAGE_READ), diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 9ea83d80eb6f..c6af40ce03be 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1137,6 +1137,10 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif +#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED +extern int devmem_is_allowed(unsigned long pfn); +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_IO_H */ diff --git a/lib/Kconfig b/lib/Kconfig index b46a9fd122c8..46806332a8cc 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -686,6 +686,9 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool +config GENERIC_LIB_DEVMEM_IS_ALLOWED + bool + config PLDMFW bool default n diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7d7097c5dc58..e6e58b26e888 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1676,7 +1676,7 @@ config ARCH_HAS_DEVMEM_IS_ALLOWED config STRICT_DEVMEM bool "Filter access to /dev/mem" depends on MMU && DEVMEM - depends on ARCH_HAS_DEVMEM_IS_ALLOWED + depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED default y if PPC || X86 || ARM64 help If this option is disabled, you allow userspace (root) access to all diff --git a/lib/Makefile b/lib/Makefile index 8598e8796edf..afeff05fa8c5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -354,3 +354,5 @@ obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o obj-$(CONFIG_BITS_TEST) += test_bits.o obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o + +obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c new file mode 100644 index 000000000000..c0d67c541849 --- /dev/null +++ b/lib/devmem_is_allowed.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * A generic version of devmem_is_allowed. + * + * Based on arch/arm64/mm/mmap.c + * + * Copyright (C) 2020 Google, Inc. + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/mm.h> +#include <linux/ioport.h> + +/* + * devmem_is_allowed() checks to see if /dev/mem access to a certain address + * is valid. The argument is a physical page number. We mimic x86 here by + * disallowing access to system RAM as well as device-exclusive MMIO regions. + * This effectively disable read()/write() on /dev/mem. + */ +int devmem_is_allowed(unsigned long pfn) +{ + if (iomem_is_exclusive(pfn << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pfn)) + return 1; + return 0; +} |