diff options
author | Linus Torvalds | 2019-09-16 15:29:34 -0700 |
---|---|---|
committer | Linus Torvalds | 2019-09-16 15:29:34 -0700 |
commit | 58d4fafd0b4c36838077a5d7b17df537b7226f1c (patch) | |
tree | 1b6b824c5ca4d1a5ff72219c18ee16dd23f90c4d /arch/riscv | |
parent | dbcda58ad98936079c48728c12c27a2f333fb484 (diff) | |
parent | 9ce06497c2722a0f9109e4cc3ce35b7a69617886 (diff) |
Merge tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Paul Walmsley:
"Add the following new features:
- Generic CPU topology description support for DT-based platforms,
including ARM64, ARM and RISC-V.
- Sparsemem support
- Perf callchain support
- SiFive PLIC irqchip modifications, in preparation for M-mode Linux
and clean up the code base:
- Clean up chip-specific register (CSR) manipulation code, IPIs, TLB
flushing, and the RISC-V CPU-local timer code
- Kbuild cleanup from one of the Kbuild maintainers"
[ The CPU topology parts came in through the arm64 tree with a shared
branch - Linus ]
* tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
irqchip/sifive-plic: set max threshold for ignored handlers
riscv: move the TLB flush logic out of line
riscv: don't use the rdtime(h) pseudo-instructions
riscv: cleanup riscv_cpuid_to_hartid_mask
riscv: optimize send_ipi_single
riscv: cleanup send_ipi_mask
riscv: refactor the IPI code
riscv: Add support for libdw
riscv: Add support for perf registers sampling
riscv: Add perf callchain support
riscv: add arch/riscv/Kbuild
RISC-V: Implement sparsemem
riscv: Using CSR numbers to access CSRs
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/Kbuild | 3 | ||||
-rw-r--r-- | arch/riscv/Kconfig | 23 | ||||
-rw-r--r-- | arch/riscv/Makefile | 5 | ||||
-rw-r--r-- | arch/riscv/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 13 | ||||
-rw-r--r-- | arch/riscv/include/asm/smp.h | 6 | ||||
-rw-r--r-- | arch/riscv/include/asm/sparsemem.h | 11 | ||||
-rw-r--r-- | arch/riscv/include/asm/timex.h | 44 | ||||
-rw-r--r-- | arch/riscv/include/asm/tlbflush.h | 38 | ||||
-rw-r--r-- | arch/riscv/include/uapi/asm/perf_regs.h | 42 | ||||
-rw-r--r-- | arch/riscv/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/riscv/kernel/entry.S | 6 | ||||
-rw-r--r-- | arch/riscv/kernel/fpu.S | 8 | ||||
-rw-r--r-- | arch/riscv/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/riscv/kernel/perf_callchain.c | 94 | ||||
-rw-r--r-- | arch/riscv/kernel/perf_regs.c | 44 | ||||
-rw-r--r-- | arch/riscv/kernel/smp.c | 60 | ||||
-rw-r--r-- | arch/riscv/kernel/stacktrace.c | 4 | ||||
-rw-r--r-- | arch/riscv/lib/uaccess.S | 12 | ||||
-rw-r--r-- | arch/riscv/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 1 | ||||
-rw-r--r-- | arch/riscv/mm/context.c | 7 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 12 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 35 |
24 files changed, 369 insertions, 110 deletions
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild new file mode 100644 index 000000000000..d1d0aa70fdf1 --- /dev/null +++ b/arch/riscv/Kbuild @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-y += kernel/ mm/ net/ diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 86ee362a1375..1211543c330c 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -35,6 +35,8 @@ config RISCV select HAVE_DMA_CONTIGUOUS select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select SPARSE_IRQ @@ -55,6 +57,7 @@ config RISCV select EDAC_SUPPORT select ARCH_HAS_GIGANTIC_PAGE select ARCH_WANT_HUGE_PMD_SHARE if 64BIT + select SPARSEMEM_STATIC if 32BIT config MMU def_bool y @@ -63,12 +66,32 @@ config ZONE_DMA32 bool default y if 64BIT +config VA_BITS + int + default 32 if 32BIT + default 39 if 64BIT + +config PA_BITS + int + default 34 if 32BIT + default 56 if 64BIT + config PAGE_OFFSET hex default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB +config ARCH_FLATMEM_ENABLE + def_bool y + +config ARCH_SPARSEMEM_ENABLE + def_bool y + select SPARSEMEM_VMEMMAP_ENABLE + +config ARCH_SELECT_MEMORY_MODEL + def_bool ARCH_SPARSEMEM_ENABLE + config ARCH_WANT_GENERAL_HUGETLB def_bool y diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 7a117be8297c..4f0a3d2018d2 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -54,6 +54,9 @@ endif ifeq ($(CONFIG_MODULE_SECTIONS),y) KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/riscv/kernel/module.lds endif +ifeq ($(CONFIG_PERF_EVENTS),y) + KBUILD_CFLAGS += -fno-omit-frame-pointer +endif KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) @@ -72,7 +75,7 @@ KBUILD_IMAGE := $(boot)/Image.gz head-y := arch/riscv/kernel/head.o -core-y += arch/riscv/kernel/ arch/riscv/mm/ arch/riscv/net/ +core-y += arch/riscv/ libs-y += arch/riscv/lib/ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 707e00a8430b..3db261c4810f 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -110,8 +110,10 @@ extern unsigned long min_low_pfn; #define page_to_bus(page) (page_to_phys(page)) #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) +#ifdef CONFIG_FLATMEM #define pfn_valid(pfn) \ (((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr)) +#endif #define ARCH_PFN_OFFSET (pfn_base) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index c24a083b3e12..80905b27ee98 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -84,6 +84,19 @@ extern pgd_t swapper_pg_dir[]; #define __S111 PAGE_SHARED_EXEC /* + * Roughly size the vmemmap space to be large enough to fit enough + * struct pages to map half the virtual address space. Then + * position vmemmap directly below the VMALLOC region. + */ +#define VMEMMAP_SHIFT \ + (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) +#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) +#define VMEMMAP_END (VMALLOC_START - 1) +#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) + +#define vmemmap ((struct page *)VMEMMAP_START) + +/* * ZERO_PAGE is a global shared page that is always zero, * used for zero-mapped memory areas, etc. */ diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index c6ed4d691def..a83451d73a4e 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -61,11 +61,5 @@ static inline unsigned long cpuid_to_hartid_map(int cpu) return boot_cpu_hartid; } -static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, - struct cpumask *out) -{ - cpumask_set_cpu(cpuid_to_hartid_map(0), out); -} - #endif /* CONFIG_SMP */ #endif /* _ASM_RISCV_SMP_H */ diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h new file mode 100644 index 000000000000..b58ba2d9ed6e --- /dev/null +++ b/arch/riscv/include/asm/sparsemem.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SPARSEMEM_H +#define __ASM_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM +#define MAX_PHYSMEM_BITS CONFIG_PA_BITS +#define SECTION_SIZE_BITS 27 +#endif /* CONFIG_SPARSEMEM */ + +#endif /* __ASM_SPARSEMEM_H */ diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index 6a703ec9d796..c7ef131b9e4c 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -6,43 +6,41 @@ #ifndef _ASM_RISCV_TIMEX_H #define _ASM_RISCV_TIMEX_H -#include <asm/param.h> +#include <asm/csr.h> typedef unsigned long cycles_t; -static inline cycles_t get_cycles_inline(void) +static inline cycles_t get_cycles(void) { - cycles_t n; - - __asm__ __volatile__ ( - "rdtime %0" - : "=r" (n)); - return n; + return csr_read(CSR_TIME); } -#define get_cycles get_cycles_inline +#define get_cycles get_cycles #ifdef CONFIG_64BIT -static inline uint64_t get_cycles64(void) +static inline u64 get_cycles64(void) +{ + return get_cycles(); +} +#else /* CONFIG_64BIT */ +static inline u32 get_cycles_hi(void) { - return get_cycles(); + return csr_read(CSR_TIMEH); } -#else -static inline uint64_t get_cycles64(void) + +static inline u64 get_cycles64(void) { - u32 lo, hi, tmp; - __asm__ __volatile__ ( - "1:\n" - "rdtimeh %0\n" - "rdtime %1\n" - "rdtimeh %2\n" - "bne %0, %2, 1b" - : "=&r" (hi), "=&r" (lo), "=&r" (tmp)); + u32 hi, lo; + + do { + hi = get_cycles_hi(); + lo = get_cycles(); + } while (hi != get_cycles_hi()); + return ((u64)hi << 32) | lo; } -#endif +#endif /* CONFIG_64BIT */ #define ARCH_HAS_READ_CURRENT_TIMER - static inline int read_current_timer(unsigned long *timer_val) { *timer_val = get_cycles(); diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 4d9bbe8438bf..37ae4e367ad2 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr) __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); } -#ifndef CONFIG_SMP - +#ifdef CONFIG_SMP +void flush_tlb_all(void); +void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +#else /* CONFIG_SMP */ #define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) @@ -37,35 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, } #define flush_tlb_mm(mm) flush_tlb_all() - -#else /* CONFIG_SMP */ - -#include <asm/sbi.h> - -static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start, - unsigned long size) -{ - struct cpumask hmask; - - cpumask_clear(&hmask); - riscv_cpuid_to_hartid_mask(cmask, &hmask); - sbi_remote_sfence_vma(hmask.bits, start, size); -} - -#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) - -#define flush_tlb_range(vma, start, end) \ - remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - flush_tlb_range(vma, addr, addr + PAGE_SIZE); -} - -#define flush_tlb_mm(mm) \ - remote_sfence_vma(mm_cpumask(mm), 0, -1) - #endif /* CONFIG_SMP */ /* Flush a range of kernel pages */ diff --git a/arch/riscv/include/uapi/asm/perf_regs.h b/arch/riscv/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..196f964bfcb4 --- /dev/null +++ b/arch/riscv/include/uapi/asm/perf_regs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */ + +#ifndef _ASM_RISCV_PERF_REGS_H +#define _ASM_RISCV_PERF_REGS_H + +enum perf_event_riscv_regs { + PERF_REG_RISCV_PC, + PERF_REG_RISCV_RA, + PERF_REG_RISCV_SP, + PERF_REG_RISCV_GP, + PERF_REG_RISCV_TP, + PERF_REG_RISCV_T0, + PERF_REG_RISCV_T1, + PERF_REG_RISCV_T2, + PERF_REG_RISCV_S0, + PERF_REG_RISCV_S1, + PERF_REG_RISCV_A0, + PERF_REG_RISCV_A1, + PERF_REG_RISCV_A2, + PERF_REG_RISCV_A3, + PERF_REG_RISCV_A4, + PERF_REG_RISCV_A5, + PERF_REG_RISCV_A6, + PERF_REG_RISCV_A7, + PERF_REG_RISCV_S2, + PERF_REG_RISCV_S3, + PERF_REG_RISCV_S4, + PERF_REG_RISCV_S5, + PERF_REG_RISCV_S6, + PERF_REG_RISCV_S7, + PERF_REG_RISCV_S8, + PERF_REG_RISCV_S9, + PERF_REG_RISCV_S10, + PERF_REG_RISCV_S11, + PERF_REG_RISCV_T3, + PERF_REG_RISCV_T4, + PERF_REG_RISCV_T5, + PERF_REG_RISCV_T6, + PERF_REG_RISCV_MAX, +}; +#endif /* _ASM_RISCV_PERF_REGS_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 2420d37d96de..696020ff72db 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -38,6 +38,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o -obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o clean: diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index bc7a56e1ca6f..74ccfd464071 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -167,7 +167,7 @@ ENTRY(handle_exception) tail do_IRQ 1: /* Exceptions run with interrupts enabled */ - csrs sstatus, SR_SIE + csrs CSR_SSTATUS, SR_SIE /* Handle syscalls */ li t0, EXC_SYSCALL @@ -222,7 +222,7 @@ ret_from_syscall: ret_from_exception: REG_L s0, PT_SSTATUS(sp) - csrc sstatus, SR_SIE + csrc CSR_SSTATUS, SR_SIE andi s0, s0, SR_SPP bnez s0, resume_kernel @@ -265,7 +265,7 @@ work_pending: bnez s1, work_resched work_notifysig: /* Handle pending signals and notify-resume requests */ - csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */ + csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ move a0, sp /* pt_regs */ move a1, s0 /* current_thread_info->flags */ tail do_notify_resume diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index 1defb0618aff..631d31540660 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -23,7 +23,7 @@ ENTRY(__fstate_save) li a2, TASK_THREAD_F0 add a0, a0, a2 li t1, SR_FS - csrs sstatus, t1 + csrs CSR_SSTATUS, t1 frcsr t0 fsd f0, TASK_THREAD_F0_F0(a0) fsd f1, TASK_THREAD_F1_F0(a0) @@ -58,7 +58,7 @@ ENTRY(__fstate_save) fsd f30, TASK_THREAD_F30_F0(a0) fsd f31, TASK_THREAD_F31_F0(a0) sw t0, TASK_THREAD_FCSR_F0(a0) - csrc sstatus, t1 + csrc CSR_SSTATUS, t1 ret ENDPROC(__fstate_save) @@ -67,7 +67,7 @@ ENTRY(__fstate_restore) add a0, a0, a2 li t1, SR_FS lw t0, TASK_THREAD_FCSR_F0(a0) - csrs sstatus, t1 + csrs CSR_SSTATUS, t1 fld f0, TASK_THREAD_F0_F0(a0) fld f1, TASK_THREAD_F1_F0(a0) fld f2, TASK_THREAD_F2_F0(a0) @@ -101,6 +101,6 @@ ENTRY(__fstate_restore) fld f30, TASK_THREAD_F30_F0(a0) fld f31, TASK_THREAD_F31_F0(a0) fscsr t0 - csrc sstatus, t1 + csrc CSR_SSTATUS, t1 ret ENDPROC(__fstate_restore) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 52eec0c1bf30..15a9189f91ad 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -61,7 +61,7 @@ _start_kernel: * floating point in kernel space */ li t0, SR_FS - csrc sstatus, t0 + csrc CSR_SSTATUS, t0 /* Pick one hart to run the main boot sequence */ la a3, hart_lottery diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c new file mode 100644 index 000000000000..8d2804f05cf9 --- /dev/null +++ b/arch/riscv/kernel/perf_callchain.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */ + +#include <linux/perf_event.h> +#include <linux/uaccess.h> + +/* Kernel callchain */ +struct stackframe { + unsigned long fp; + unsigned long ra; +}; + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, + unsigned long fp, unsigned long reg_ra) +{ + struct stackframe buftail; + unsigned long ra = 0; + unsigned long *user_frame_tail = + (unsigned long *)(fp - sizeof(struct stackframe)); + + /* Check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + if (__copy_from_user_inatomic(&buftail, user_frame_tail, + sizeof(buftail))) + return 0; + + if (reg_ra != 0) + ra = reg_ra; + else + ra = buftail.ra; + + fp = buftail.fp; + if (ra != 0) + perf_callchain_store(entry, ra); + else + return 0; + + return fp; +} + +/* + * This will be called when the target is in user mode + * This function will only be called when we use + * "PERF_SAMPLE_CALLCHAIN" in + * kernel/events/core.c:perf_prepare_sample() + * + * How to trigger perf_callchain_[user/kernel] : + * $ perf record -e cpu-clock --call-graph fp ./program + * $ perf report --call-graph + * + * On RISC-V platform, the program being sampled and the C library + * need to be compiled with -fno-omit-frame-pointer, otherwise + * the user stack will not contain function frame. + */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long fp = 0; + + /* RISC-V does not support perf in guest mode. */ + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + return; + + fp = regs->s0; + perf_callchain_store(entry, regs->sepc); + + fp = user_backtrace(entry, fp, regs->ra); + while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) + fp = user_backtrace(entry, fp, 0); +} + +bool fill_callchain(unsigned long pc, void *entry) +{ + return perf_callchain_store(entry, pc); +} + +void notrace walk_stackframe(struct task_struct *task, + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg); +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + /* RISC-V does not support perf in guest mode. */ + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + pr_warn("RISC-V does not support perf in guest mode!"); + return; + } + + walk_stackframe(NULL, regs, fill_callchain, entry); +} diff --git a/arch/riscv/kernel/perf_regs.c b/arch/riscv/kernel/perf_regs.c new file mode 100644 index 000000000000..04a38fbeb9c7 --- /dev/null +++ b/arch/riscv/kernel/perf_regs.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/bug.h> +#include <asm/perf_regs.h> +#include <asm/ptrace.h> + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_RISCV_MAX)) + return 0; + + return ((unsigned long *)regs)[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_RISCV_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ +#if __riscv_xlen == 64 + return PERF_SAMPLE_REGS_ABI_64; +#else + return PERF_SAMPLE_REGS_ABI_32; +#endif +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 5a9834503a2f..3836760d7aaf 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -56,6 +56,7 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) { int cpu; + cpumask_clear(out); for_each_cpu(cpu, in) cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); } @@ -78,13 +79,42 @@ static void ipi_stop(void) wait_for_interrupt(); } +static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) +{ + struct cpumask hartid_mask; + int cpu; + + smp_mb__before_atomic(); + for_each_cpu(cpu, mask) + set_bit(op, &ipi_data[cpu].bits); + smp_mb__after_atomic(); + + riscv_cpuid_to_hartid_mask(mask, &hartid_mask); + sbi_send_ipi(cpumask_bits(&hartid_mask)); +} + +static void send_ipi_single(int cpu, enum ipi_message_type op) +{ + int hartid = cpuid_to_hartid_map(cpu); + + smp_mb__before_atomic(); + set_bit(op, &ipi_data[cpu].bits); + smp_mb__after_atomic(); + + sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); +} + +static inline void clear_ipi(void) +{ + csr_clear(CSR_SIP, SIE_SSIE); +} + void riscv_software_interrupt(void) { unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; unsigned long *stats = ipi_data[smp_processor_id()].stats; - /* Clear pending IPI */ - csr_clear(CSR_SIP, SIE_SSIE); + clear_ipi(); while (true) { unsigned long ops; @@ -118,23 +148,6 @@ void riscv_software_interrupt(void) } } -static void -send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) -{ - int cpuid, hartid; - struct cpumask hartid_mask; - - cpumask_clear(&hartid_mask); - mb(); - for_each_cpu(cpuid, to_whom) { - set_bit(operation, &ipi_data[cpuid].bits); - hartid = cpuid_to_hartid_map(cpuid); - cpumask_set_cpu(hartid, &hartid_mask); - } - mb(); - sbi_send_ipi(cpumask_bits(&hartid_mask)); -} - static const char * const ipi_names[] = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", @@ -156,12 +169,12 @@ void show_ipi_stats(struct seq_file *p, int prec) void arch_send_call_function_ipi_mask(struct cpumask *mask) { - send_ipi_message(mask, IPI_CALL_FUNC); + send_ipi_mask(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); + send_ipi_single(cpu, IPI_CALL_FUNC); } void smp_send_stop(void) @@ -176,7 +189,7 @@ void smp_send_stop(void) if (system_state <= SYSTEM_RUNNING) pr_crit("SMP: stopping secondary CPUs\n"); - send_ipi_message(&mask, IPI_CPU_STOP); + send_ipi_mask(&mask, IPI_CPU_STOP); } /* Wait up to one second for other CPUs to stop */ @@ -191,6 +204,5 @@ void smp_send_stop(void) void smp_send_reschedule(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); + send_ipi_single(cpu, IPI_RESCHEDULE); } - diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index f15642715d1a..0940681d2f68 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -19,8 +19,8 @@ struct stackframe { unsigned long ra; }; -static void notrace walk_stackframe(struct task_struct *task, - struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + bool (*fn)(unsigned long, void *), void *arg) { unsigned long fp, sp, pc; diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 399e6f0c2d98..ed2696c0143d 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user) /* Enable access to user memory */ li t6, SR_SUM - csrs sstatus, t6 + csrs CSR_SSTATUS, t6 add a3, a1, a2 /* Use word-oriented copy only if low-order bits match */ @@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user) 3: /* Disable access to user memory */ - csrc sstatus, t6 + csrc CSR_SSTATUS, t6 li a0, 0 ret 4: /* Edge case: unalignment */ @@ -72,7 +72,7 @@ ENTRY(__clear_user) /* Enable access to user memory */ li t6, SR_SUM - csrs sstatus, t6 + csrs CSR_SSTATUS, t6 add a3, a0, a1 addi t0, a0, SZREG-1 @@ -94,7 +94,7 @@ ENTRY(__clear_user) 3: /* Disable access to user memory */ - csrc sstatus, t6 + csrc CSR_SSTATUS, t6 li a0, 0 ret 4: /* Edge case: unalignment */ @@ -114,11 +114,11 @@ ENDPROC(__clear_user) /* Fixup code for __copy_user(10) and __clear_user(11) */ 10: /* Disable access to user memory */ - csrs sstatus, t6 + csrs CSR_SSTATUS, t6 mv a0, a2 ret 11: - csrs sstatus, t6 + csrs CSR_SSTATUS, t6 mv a0, a1 ret .previous diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 74055e1d6f21..9d9a17335686 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -13,4 +13,7 @@ obj-y += cacheflush.o obj-y += context.o obj-y += sifive_l2_cache.o +ifeq ($(CONFIG_MMU),y) +obj-$(CONFIG_SMP) += tlbflush.o +endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 9ebcff8ba263..3f15938dec89 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -47,7 +47,6 @@ void flush_icache_mm(struct mm_struct *mm, bool local) cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); local |= cpumask_empty(&others); if (mm != current->active_mm || !local) { - cpumask_clear(&hmask); riscv_cpuid_to_hartid_mask(&others, &hmask); sbi_remote_fence_i(hmask.bits); } else { diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 89ceb3cbe218..beeb5d7f92ea 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -57,12 +57,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); - /* - * Use the old spbtr name instead of using the current satp - * name to support binutils 2.29 which doesn't know about the - * privileged ISA 1.10 yet. - */ - csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE); + csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); local_flush_tlb_all(); flush_icache_deferred(next); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 42bf939693d3..f0ba71304b6e 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -435,13 +435,23 @@ static void __init setup_vm_final(void) clear_fixmap(FIX_PMD); /* Move to swapper page table */ - csr_write(sptbr, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); + csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); } void __init paging_init(void) { setup_vm_final(); + memblocks_present(); + sparse_init(); setup_zero_page(); zone_sizes_init(); } + +#ifdef CONFIG_SPARSEMEM +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + return vmemmap_populate_basepages(start, end, node); +} +#endif diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c new file mode 100644 index 000000000000..24cd33d2c48f --- /dev/null +++ b/arch/riscv/mm/tlbflush.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/mm.h> +#include <linux/smp.h> +#include <asm/sbi.h> + +void flush_tlb_all(void) +{ + sbi_remote_sfence_vma(NULL, 0, -1); +} + +static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, + unsigned long size) +{ + struct cpumask hmask; + + riscv_cpuid_to_hartid_mask(cmask, &hmask); + sbi_remote_sfence_vma(hmask.bits, start, size); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); +} |