diff options
Diffstat (limited to 'arch')
50 files changed, 756 insertions, 595 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c4207cf9bb17..ef3b5cb40d16 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -150,6 +150,8 @@ config ARM64 select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE) + # Some instrumentation may be unsound, hence EXPERT + select HAVE_ARCH_KCSAN if EXPERT select HAVE_ARCH_KFENCE select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS @@ -1545,6 +1547,12 @@ endmenu menu "ARMv8.2 architectural features" +config AS_HAS_ARMV8_2 + def_bool $(cc-option,-Wa$(comma)-march=armv8.2-a) + +config AS_HAS_SHA3 + def_bool $(as-instr,.arch armv8.2-a+sha3) + config ARM64_PMEM bool "Enable support for persistent memory" select ARCH_HAS_PMEM_API diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index e8cfc5868aa8..2f1de88651e6 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -58,6 +58,11 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +ifeq ($(CONFIG_AS_HAS_ARMV8_2), y) +# make sure to pass the newest target architecture to -march. +asm-arch := armv8.2-a +endif + # Ensure that if the compiler supports branch protection we default it # off, this will be overridden if we are using branch protection. branch-prot-flags-y += $(call cc-option,-mbranch-protection=none) diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index b495de22bb38..ff01f0167ba2 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -363,15 +363,15 @@ ST5( mov v4.16b, vctr.16b ) adr x16, 1f sub x16, x16, x12, lsl #3 br x16 - hint 34 // bti c + bti c mov v0.d[0], vctr.d[0] - hint 34 // bti c + bti c mov v1.d[0], vctr.d[0] - hint 34 // bti c + bti c mov v2.d[0], vctr.d[0] - hint 34 // bti c + bti c mov v3.d[0], vctr.d[0] -ST5( hint 34 ) +ST5( bti c ) ST5( mov v4.d[0], vctr.d[0] ) 1: b 2f .previous diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 136d13f3d6e9..e8bd0af0141c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -791,6 +791,16 @@ alternative_endif .endm /* + * Branch Target Identifier (BTI) + */ + .macro bti, targets + .equ .L__bti_targets_c, 34 + .equ .L__bti_targets_j, 36 + .equ .L__bti_targets_jc,38 + hint #.L__bti_targets_\targets + .endm + +/* * This macro emits a program property note section identifying * architecture features which require special handling, mainly for * use in assembly files included in the VDSO. diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 13869b76b58c..fe0db8d416fb 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ \ asm volatile("// atomic_" #op "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " stxr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" st" #rel "xr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " st" #rel "xr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %w0, %3\n" \ -" " #asm_op " %w1, %w0, %w4\n" \ -" st" #rel "xr %w2, %w1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %w0, %3\n" \ + " " #asm_op " %w1, %w0, %w4\n" \ + " st" #rel "xr %w2, %w1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ \ asm volatile("// atomic64_" #op "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " stxr %w1, %0, %2\n" \ + " cbnz %w1, 1b") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" st" #rel "xr %w1, %0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " st" #rel "xr %w1, %0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ static inline long \ -__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %0, %3\n" \ -" " #asm_op " %1, %0, %4\n" \ -" st" #rel "xr %w2, %1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %0, %3\n" \ + " " #asm_op " %1, %0, %4\n" \ + " st" #rel "xr %w2, %1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) asm volatile("// atomic64_dec_if_positive\n" __LL_SC_FALLBACK( -" prfm pstl1strm, %2\n" -"1: ldxr %0, %2\n" -" subs %0, %0, #1\n" -" b.lt 2f\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b\n" -" dmb ish\n" -"2:") + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " subs %0, %0, #1\n" + " b.lt 2f\n" + " stlxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + " dmb ish\n" + "2:") : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index da3280f639cd..d955ade5df7c 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -11,13 +11,13 @@ #define __ASM_ATOMIC_LSE_H #define ATOMIC_OP(op, asm_op) \ -static inline void __lse_atomic_##op(int i, atomic_t *v) \ +static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op " %w[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ + " " #asm_op " %w[i], %[v]\n" \ + : [v] "+Q" (v->counter) \ + : [i] "r" (i)); \ } ATOMIC_OP(andnot, stclr) @@ -25,19 +25,27 @@ ATOMIC_OP(or, stset) ATOMIC_OP(xor, steor) ATOMIC_OP(add, stadd) +static inline void __lse_atomic_sub(int i, atomic_t *v) +{ + __lse_atomic_add(-i, v); +} + #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ + int old; \ + \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op #mb " %w[i], %w[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ + " " #asm_op #mb " %w[i], %w[old], %[v]" \ + : [v] "+Q" (v->counter), \ + [old] "=r" (old) \ + : [i] "r" (i) \ : cl); \ \ - return i; \ + return old; \ } #define ATOMIC_FETCH_OPS(op, asm_op) \ @@ -54,51 +62,46 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OPS -#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ +#define ATOMIC_FETCH_OP_SUB(name) \ +static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ +{ \ + return __lse_atomic_fetch_add##name(-i, v); \ +} + +ATOMIC_FETCH_OP_SUB(_relaxed) +ATOMIC_FETCH_OP_SUB(_acquire) +ATOMIC_FETCH_OP_SUB(_release) +ATOMIC_FETCH_OP_SUB( ) + +#undef ATOMIC_FETCH_OP_SUB + +#define ATOMIC_OP_ADD_SUB_RETURN(name) \ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ { \ - u32 tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ + return __lse_atomic_fetch_add##name(i, v) + i; \ +} \ \ - return i; \ +static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ +{ \ + return __lse_atomic_fetch_sub(i, v) - i; \ } -ATOMIC_OP_ADD_RETURN(_relaxed, ) -ATOMIC_OP_ADD_RETURN(_acquire, a, "memory") -ATOMIC_OP_ADD_RETURN(_release, l, "memory") -ATOMIC_OP_ADD_RETURN( , al, "memory") +ATOMIC_OP_ADD_SUB_RETURN(_relaxed) +ATOMIC_OP_ADD_SUB_RETURN(_acquire) +ATOMIC_OP_ADD_SUB_RETURN(_release) +ATOMIC_OP_ADD_SUB_RETURN( ) -#undef ATOMIC_OP_ADD_RETURN +#undef ATOMIC_OP_ADD_SUB_RETURN static inline void __lse_atomic_and(int i, atomic_t *v) { - asm volatile( - __LSE_PREAMBLE - " mvn %w[i], %w[i]\n" - " stclr %w[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); + return __lse_atomic_andnot(~i, v); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ - asm volatile( \ - __LSE_PREAMBLE \ - " mvn %w[i], %w[i]\n" \ - " ldclr" #mb " %w[i], %w[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ + return __lse_atomic_fetch_andnot##name(~i, v); \ } ATOMIC_FETCH_OP_AND(_relaxed, ) @@ -108,69 +111,14 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void __lse_atomic_sub(int i, atomic_t *v) -{ - asm volatile( - __LSE_PREAMBLE - " neg %w[i], %w[i]\n" - " stadd %w[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); -} - -#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ -{ \ - u32 tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC_OP_SUB_RETURN(_relaxed, ) -ATOMIC_OP_SUB_RETURN(_acquire, a, "memory") -ATOMIC_OP_SUB_RETURN(_release, l, "memory") -ATOMIC_OP_SUB_RETURN( , al, "memory") - -#undef ATOMIC_OP_SUB_RETURN - -#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ -{ \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC_FETCH_OP_SUB(_relaxed, ) -ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") -ATOMIC_FETCH_OP_SUB(_release, l, "memory") -ATOMIC_FETCH_OP_SUB( , al, "memory") - -#undef ATOMIC_FETCH_OP_SUB - #define ATOMIC64_OP(op, asm_op) \ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op " %[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ + " " #asm_op " %[i], %[v]\n" \ + : [v] "+Q" (v->counter) \ + : [i] "r" (i)); \ } ATOMIC64_OP(andnot, stclr) @@ -178,19 +126,27 @@ ATOMIC64_OP(or, stset) ATOMIC64_OP(xor, steor) ATOMIC64_OP(add, stadd) +static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) +{ + __lse_atomic64_add(-i, v); +} + #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ + s64 old; \ + \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op #mb " %[i], %[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ + " " #asm_op #mb " %[i], %[old], %[v]" \ + : [v] "+Q" (v->counter), \ + [old] "=r" (old) \ + : [i] "r" (i) \ : cl); \ \ - return i; \ + return old; \ } #define ATOMIC64_FETCH_OPS(op, asm_op) \ @@ -207,51 +163,46 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OPS -#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ +#define ATOMIC64_FETCH_OP_SUB(name) \ +static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ +{ \ + return __lse_atomic64_fetch_add##name(-i, v); \ +} + +ATOMIC64_FETCH_OP_SUB(_relaxed) +ATOMIC64_FETCH_OP_SUB(_acquire) +ATOMIC64_FETCH_OP_SUB(_release) +ATOMIC64_FETCH_OP_SUB( ) + +#undef ATOMIC64_FETCH_OP_SUB + +#define ATOMIC64_OP_ADD_SUB_RETURN(name) \ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ { \ - unsigned long tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ - " add %[i], %[i], %x[tmp]" \ - : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ + return __lse_atomic64_fetch_add##name(i, v) + i; \ +} \ \ - return i; \ +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ +{ \ + return __lse_atomic64_fetch_sub##name(i, v) - i; \ } -ATOMIC64_OP_ADD_RETURN(_relaxed, ) -ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory") -ATOMIC64_OP_ADD_RETURN(_release, l, "memory") -ATOMIC64_OP_ADD_RETURN( , al, "memory") +ATOMIC64_OP_ADD_SUB_RETURN(_relaxed) +ATOMIC64_OP_ADD_SUB_RETURN(_acquire) +ATOMIC64_OP_ADD_SUB_RETURN(_release) +ATOMIC64_OP_ADD_SUB_RETURN( ) -#undef ATOMIC64_OP_ADD_RETURN +#undef ATOMIC64_OP_ADD_SUB_RETURN static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { - asm volatile( - __LSE_PREAMBLE - " mvn %[i], %[i]\n" - " stclr %[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); + return __lse_atomic64_andnot(~i, v); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ - asm volatile( \ - __LSE_PREAMBLE \ - " mvn %[i], %[i]\n" \ - " ldclr" #mb " %[i], %[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ + return __lse_atomic64_fetch_andnot##name(~i, v); \ } ATOMIC64_FETCH_OP_AND(_relaxed, ) @@ -261,61 +212,6 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) -{ - asm volatile( - __LSE_PREAMBLE - " neg %[i], %[i]\n" - " stadd %[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); -} - -#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ -{ \ - unsigned long tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ - " add %[i], %[i], %x[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC64_OP_SUB_RETURN(_relaxed, ) -ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory") -ATOMIC64_OP_SUB_RETURN(_release, l, "memory") -ATOMIC64_OP_SUB_RETURN( , al, "memory") - -#undef ATOMIC64_OP_SUB_RETURN - -#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ -{ \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC64_FETCH_OP_SUB(_relaxed, ) -ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") -ATOMIC64_FETCH_OP_SUB(_release, l, "memory") -ATOMIC64_FETCH_OP_SUB( , al, "memory") - -#undef ATOMIC64_FETCH_OP_SUB - static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { unsigned long tmp; diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 1c5a00598458..62217be36217 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -26,6 +26,14 @@ #define __tsb_csync() asm volatile("hint #18" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") +/* + * Data Gathering Hint: + * This instruction prevents merging memory accesses with Normal-NC or + * Device-GRE attributes before the hint instruction with any memory accesses + * appearing after the hint instruction. + */ +#define dgh() asm volatile("hint #6" : : : "memory") + #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ @@ -46,6 +54,7 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) +#define io_stop_wc() dgh() #define tsb_csync() \ do { \ diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 0f6d16faa540..a58e366f0b07 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -51,6 +51,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; + u64 reg_id_aa64isar2; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index dbb4b30a5648..cb24385e3632 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -51,8 +51,8 @@ extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_save_and_flush_cpu_state(void); -/* Maximum VL that SVE VL-agnostic software can transparently support */ -#define SVE_VL_ARCH_MAX 0x100 +/* Maximum VL that SVE/SME VL-agnostic software can transparently support */ +#define VL_ARCH_MAX 0x100 /* Offset of FFR in the SVE register dump */ static inline size_t sve_ffr_offset(int vl) @@ -122,7 +122,7 @@ extern void fpsimd_sync_to_sve(struct task_struct *task); extern void sve_sync_to_fpsimd(struct task_struct *task); extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); -extern int sve_set_vector_length(struct task_struct *task, +extern int vec_set_vector_length(struct task_struct *task, enum vec_type type, unsigned long vl, unsigned long flags); extern int sve_set_current_vl(unsigned long arg); diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index b100e0055eab..f68fbb207473 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -106,6 +106,8 @@ #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI) #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE) #define KERNEL_HWCAP_ECV __khwcap2_feature(ECV) +#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP) +#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 9906541a6861..b77e9b3f5371 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -1,48 +1,43 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H +#ifdef __ASSEMBLY__ +#include <asm/assembler.h> +#endif + #define __ALIGN .align 2 #define __ALIGN_STR ".align 2" -#if defined(CONFIG_ARM64_BTI_KERNEL) && defined(__aarch64__) - -/* - * Since current versions of gas reject the BTI instruction unless we - * set the architecture version to v8.5 we use the hint instruction - * instead. - */ -#define BTI_C hint 34 ; - /* - * When using in-kernel BTI we need to ensure that PCS-conformant assembly - * functions have suitable annotations. Override SYM_FUNC_START to insert - * a BTI landing pad at the start of everything. + * When using in-kernel BTI we need to ensure that PCS-conformant + * assembly functions have suitable annotations. Override + * SYM_FUNC_START to insert a BTI landing pad at the start of + * everything, the override is done unconditionally so we're more + * likely to notice any drift from the overridden definitions. */ #define SYM_FUNC_START(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ - BTI_C + bti c ; #define SYM_FUNC_START_NOALIGN(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \ - BTI_C + bti c ; #define SYM_FUNC_START_LOCAL(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \ - BTI_C + bti c ; #define SYM_FUNC_START_LOCAL_NOALIGN(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \ - BTI_C + bti c ; #define SYM_FUNC_START_WEAK(name) \ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \ - BTI_C + bti c ; #define SYM_FUNC_START_WEAK_NOALIGN(name) \ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \ - BTI_C - -#endif + bti c ; /* * Annotate a function as position independent, i.e., safe to be called before diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 478b9bcf69ad..e4704a403237 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -84,10 +84,12 @@ static inline void __dc_gzva(u64 p) static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, bool init) { - u64 curr, mask, dczid_bs, end1, end2, end3; + u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3; /* Read DC G(Z)VA block size from the system register. */ - dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf); + dczid = read_cpuid(DCZID_EL0); + dczid_bs = 4ul << (dczid & 0xf); + dczid_dzp = (dczid >> 4) & 1; curr = (u64)__tag_set(addr, tag); mask = dczid_bs - 1; @@ -106,7 +108,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, */ #define SET_MEMTAG_RANGE(stg_post, dc_gva) \ do { \ - if (size >= 2 * dczid_bs) { \ + if (!dczid_dzp && size >= 2 * dczid_bs) {\ do { \ curr = stg_post(curr); \ } while (curr < end1); \ diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 6564a01cc085..e77cdef9ca29 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -47,6 +47,10 @@ struct stack_info { * @prev_type: The type of stack this frame record was on, or a synthetic * value of STACK_TYPE_UNKNOWN. This is used to detect a * transition from one stack to another. + * + * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance + * associated with the most recently encountered replacement lr + * value. */ struct stackframe { unsigned long fp; @@ -59,9 +63,6 @@ struct stackframe { #endif }; -extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); -extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - bool (*fn)(void *, unsigned long), void *data); extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl); @@ -146,7 +147,4 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, return false; } -void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc); - #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 16b3f1a1d468..4704f5893458 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -182,6 +182,7 @@ #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) +#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) @@ -771,6 +772,20 @@ #define ID_AA64ISAR1_GPI_NI 0x0 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1 +/* id_aa64isar2 */ +#define ID_AA64ISAR2_RPRES_SHIFT 4 +#define ID_AA64ISAR2_WFXT_SHIFT 0 + +#define ID_AA64ISAR2_RPRES_8BIT 0x0 +#define ID_AA64ISAR2_RPRES_12BIT 0x1 +/* + * Value 0x1 has been removed from the architecture, and is + * reserved, but has not yet been removed from the ARM ARM + * as of ARM DDI 0487G.b. + */ +#define ID_AA64ISAR2_WFXT_NI 0x0 +#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2 + /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 @@ -889,6 +904,7 @@ #endif /* id_aa64mmfr1 */ +#define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_ETS_SHIFT 36 #define ID_AA64MMFR1_TWED_SHIFT 32 #define ID_AA64MMFR1_XNX_SHIFT 28 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 7b23b16f21ce..f03731847d9d 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -76,5 +76,7 @@ #define HWCAP2_BTI (1 << 17) #define HWCAP2_MTE (1 << 18) #define HWCAP2_ECV (1 << 19) +#define HWCAP2_AFP (1 << 20) +#define HWCAP2_RPRES (1 << 21) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index f3851724fe35..e4dea8db6924 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -22,6 +22,7 @@ #include <linux/irq_work.h> #include <linux/memblock.h> #include <linux/of_fdt.h> +#include <linux/libfdt.h> #include <linux/smp.h> #include <linux/serial_core.h> #include <linux/pgtable.h> @@ -62,29 +63,22 @@ static int __init parse_acpi(char *arg) } early_param("acpi", parse_acpi); -static int __init dt_scan_depth1_nodes(unsigned long node, - const char *uname, int depth, - void *data) +static bool __init dt_is_stub(void) { - /* - * Ignore anything not directly under the root node; we'll - * catch its parent instead. - */ - if (depth != 1) - return 0; + int node; - if (strcmp(uname, "chosen") == 0) - return 0; + fdt_for_each_subnode(node, initial_boot_params, 0) { + const char *name = fdt_get_name(initial_boot_params, node, NULL); + if (strcmp(name, "chosen") == 0) + continue; + if (strcmp(name, "hypervisor") == 0 && + of_flat_dt_is_compatible(node, "xen,xen")) + continue; - if (strcmp(uname, "hypervisor") == 0 && - of_flat_dt_is_compatible(node, "xen,xen")) - return 0; + return false; + } - /* - * This node at depth 1 is neither a chosen node nor a xen node, - * which we do not expect. - */ - return 1; + return true; } /* @@ -205,8 +199,7 @@ void __init acpi_boot_table_init(void) * and ACPI has not been [force] enabled (acpi=on|force) */ if (param_acpi_off || - (!param_acpi_on && !param_acpi_force && - of_scan_flat_dt(dt_scan_depth1_nodes, NULL))) + (!param_acpi_on && !param_acpi_force && !dt_is_stub())) goto done; /* diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6f3e677d88f1..a46ab3b1c4d5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -225,6 +225,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), @@ -325,6 +330,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), @@ -637,6 +643,7 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, &id_aa64isar1_override), + ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2), /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), @@ -933,6 +940,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); @@ -1151,6 +1159,8 @@ void update_cpu_features(int cpu, info->reg_id_aa64isar0, boot->reg_id_aa64isar0); taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, info->reg_id_aa64isar1, boot->reg_id_aa64isar1); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, + info->reg_id_aa64isar2, boot->reg_id_aa64isar2); /* * Differing PARange support is fine as long as all peripherals and @@ -1272,6 +1282,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64MMFR2_EL1); read_sysreg_case(SYS_ID_AA64ISAR0_EL1); read_sysreg_case(SYS_ID_AA64ISAR1_EL1); + read_sysreg_case(SYS_ID_AA64ISAR2_EL1); read_sysreg_case(SYS_CNTFRQ_EL0); read_sysreg_case(SYS_CTR_EL0); @@ -2476,6 +2487,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 6e27b759056a..591c18a889a5 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -95,6 +95,8 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_BTI] = "bti", [KERNEL_HWCAP_MTE] = "mte", [KERNEL_HWCAP_ECV] = "ecv", + [KERNEL_HWCAP_AFP] = "afp", + [KERNEL_HWCAP_RPRES] = "rpres", }; #ifdef CONFIG_COMPAT @@ -391,6 +393,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); + info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 8cf970d219f5..e535480a4069 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -77,17 +77,13 @@ .endm SYM_CODE_START(ftrace_regs_caller) -#ifdef BTI_C - BTI_C -#endif + bti c ftrace_regs_entry 1 b ftrace_common SYM_CODE_END(ftrace_regs_caller) SYM_CODE_START(ftrace_caller) -#ifdef BTI_C - BTI_C -#endif + bti c ftrace_regs_entry 0 b ftrace_common SYM_CODE_END(ftrace_caller) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2f69ae43941d..772ec2ecf488 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -966,8 +966,10 @@ SYM_CODE_START(__sdei_asm_handler) mov sp, x1 mov x1, x0 // address to complete_and_resume - /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */ - cmp x0, #1 + /* x0 = (x0 <= SDEI_EV_FAILED) ? + * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME + */ + cmp x0, #SDEI_EV_FAILED mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME csel x0, x2, x3, ls diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index fa244c426f61..f2307d6631eb 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -15,6 +15,7 @@ #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/cpu_pm.h> +#include <linux/ctype.h> #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/irqflags.h> @@ -406,12 +407,13 @@ static unsigned int find_supported_vector_length(enum vec_type type, #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) -static int sve_proc_do_default_vl(struct ctl_table *table, int write, +static int vec_proc_do_default_vl(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct vl_info *info = &vl_info[ARM64_VEC_SVE]; + struct vl_info *info = table->extra1; + enum vec_type type = info->type; int ret; - int vl = get_sve_default_vl(); + int vl = get_default_vl(type); struct ctl_table tmp_table = { .data = &vl, .maxlen = sizeof(vl), @@ -428,7 +430,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write, if (!sve_vl_valid(vl)) return -EINVAL; - set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, vl)); + set_default_vl(type, find_supported_vector_length(type, vl)); return 0; } @@ -436,7 +438,8 @@ static struct ctl_table sve_default_vl_table[] = { { .procname = "sve_default_vector_length", .mode = 0644, - .proc_handler = sve_proc_do_default_vl, + .proc_handler = vec_proc_do_default_vl, + .extra1 = &vl_info[ARM64_VEC_SVE], }, { } }; @@ -629,7 +632,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) __fpsimd_to_sve(sst, fst, vq); } -int sve_set_vector_length(struct task_struct *task, +int vec_set_vector_length(struct task_struct *task, enum vec_type type, unsigned long vl, unsigned long flags) { if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | @@ -640,33 +643,35 @@ int sve_set_vector_length(struct task_struct *task, return -EINVAL; /* - * Clamp to the maximum vector length that VL-agnostic SVE code can - * work with. A flag may be assigned in the future to allow setting - * of larger vector lengths without confusing older software. + * Clamp to the maximum vector length that VL-agnostic code + * can work with. A flag may be assigned in the future to + * allow setting of larger vector lengths without confusing + * older software. */ - if (vl > SVE_VL_ARCH_MAX) - vl = SVE_VL_ARCH_MAX; + if (vl > VL_ARCH_MAX) + vl = VL_ARCH_MAX; - vl = find_supported_vector_length(ARM64_VEC_SVE, vl); + vl = find_supported_vector_length(type, vl); if (flags & (PR_SVE_VL_INHERIT | PR_SVE_SET_VL_ONEXEC)) - task_set_sve_vl_onexec(task, vl); + task_set_vl_onexec(task, type, vl); else /* Reset VL to system default on next exec: */ - task_set_sve_vl_onexec(task, 0); + task_set_vl_onexec(task, type, 0); /* Only actually set the VL if not deferred: */ if (flags & PR_SVE_SET_VL_ONEXEC) goto out; - if (vl == task_get_sve_vl(task)) + if (vl == task_get_vl(task, type)) goto out; /* * To ensure the FPSIMD bits of the SVE vector registers are preserved, * write any live register state back to task_struct, and convert to a - * non-SVE thread. + * regular FPSIMD thread. Since the vector length can only be changed + * with a syscall we can't be in streaming mode while reconfiguring. */ if (task == current) { get_cpu_fpsimd_context(); @@ -687,10 +692,10 @@ int sve_set_vector_length(struct task_struct *task, */ sve_free(task); - task_set_sve_vl(task, vl); + task_set_vl(task, type, vl); out: - update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT, + update_tsk_thread_flag(task, vec_vl_inherit_flag(type), flags & PR_SVE_VL_INHERIT); return 0; @@ -698,20 +703,21 @@ out: /* * Encode the current vector length and flags for return. - * This is only required for prctl(): ptrace has separate fields + * This is only required for prctl(): ptrace has separate fields. + * SVE and SME use the same bits for _ONEXEC and _INHERIT. * - * flags are as for sve_set_vector_length(). + * flags are as for vec_set_vector_length(). */ -static int sve_prctl_status(unsigned long flags) +static int vec_prctl_status(enum vec_type type, unsigned long flags) { int ret; if (flags & PR_SVE_SET_VL_ONEXEC) - ret = task_get_sve_vl_onexec(current); + ret = task_get_vl_onexec(current, type); else - ret = task_get_sve_vl(current); + ret = task_get_vl(current, type); - if (test_thread_flag(TIF_SVE_VL_INHERIT)) + if (test_thread_flag(vec_vl_inherit_flag(type))) ret |= PR_SVE_VL_INHERIT; return ret; @@ -729,11 +735,11 @@ int sve_set_current_vl(unsigned long arg) if (!system_supports_sve() || is_compat_task()) return -EINVAL; - ret = sve_set_vector_length(current, vl, flags); + ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags); if (ret) return ret; - return sve_prctl_status(flags); + return vec_prctl_status(ARM64_VEC_SVE, flags); } /* PR_SVE_GET_VL */ @@ -742,7 +748,7 @@ int sve_get_current_vl(void) if (!system_supports_sve() || is_compat_task()) return -EINVAL; - return sve_prctl_status(0); + return vec_prctl_status(ARM64_VEC_SVE, 0); } static void vec_probe_vqs(struct vl_info *info, @@ -1107,7 +1113,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type) vl = get_default_vl(type); if (WARN_ON(!sve_vl_valid(vl))) - vl = SVE_VL_MIN; + vl = vl_info[type].min_vl; supported_vl = find_supported_vector_length(type, vl); if (WARN_ON(supported_vl != vl)) @@ -1213,7 +1219,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, /* * Load the userland FPSIMD state of 'current' from memory, but only if the * FPSIMD state already held in the registers is /not/ the most recent FPSIMD - * state of 'current' + * state of 'current'. This is called when we are preparing to return to + * userspace to ensure that userspace sees a good register state. */ void fpsimd_restore_current_state(void) { @@ -1244,7 +1251,9 @@ void fpsimd_restore_current_state(void) /* * Load an updated userland FPSIMD state for 'current' from memory and set the * flag that indicates that the FPSIMD register contents are the most recent - * FPSIMD state of 'current' + * FPSIMD state of 'current'. This is used by the signal code to restore the + * register state when returning from a signal handler in FPSIMD only cases, + * any SVE context will be discarded. */ void fpsimd_update_current_state(struct user_fpsimd_state const *state) { diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 2758f75d6809..6328308be272 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -7,10 +7,6 @@ * Ubuntu project, hibernation support for mach-dove * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) - * https://lkml.org/lkml/2010/6/18/4 - * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html - * https://patchwork.kernel.org/patch/96442/ - * * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> */ #define pr_fmt(x) "hibernate: " x diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 6fb31c117ebe..e16b248699d5 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -104,13 +104,15 @@ static void *kexec_page_alloc(void *arg) { struct kimage *kimage = (struct kimage *)arg; struct page *page = kimage_alloc_control_pages(kimage, 0); + void *vaddr = NULL; if (!page) return NULL; - memset(page_address(page), 0, PAGE_SIZE); + vaddr = page_address(page); + memset(vaddr, 0, PAGE_SIZE); - return page_address(page); + return vaddr; } int machine_kexec_post_load(struct kimage *kimage) diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 4a72c2727309..e9b7d99f4e3a 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -5,10 +5,10 @@ * Copyright (C) 2015 ARM Limited */ #include <linux/perf_event.h> +#include <linux/stacktrace.h> #include <linux/uaccess.h> #include <asm/pointer_auth.h> -#include <asm/stacktrace.h> struct frame_tail { struct frame_tail __user *fp; @@ -132,30 +132,21 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, } } -/* - * Gets called by walk_stackframe() for every stackframe. This will be called - * whist unwinding the stackframe and is like a subroutine return so we use - * the PC. - */ static bool callchain_trace(void *data, unsigned long pc) { struct perf_callchain_entry_ctx *entry = data; - perf_callchain_store(entry, pc); - return true; + return perf_callchain_store(entry, pc) == 0; } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - struct stackframe frame; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } - start_backtrace(&frame, regs->regs[29], regs->pc); - walk_stackframe(current, &frame, callchain_trace, entry); + arch_stack_walk(callchain_trace, entry, current, regs); } unsigned long perf_instruction_pointer(struct pt_regs *regs) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index b4044469527e..cab678ed6618 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -285,15 +285,24 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = { PMU_FORMAT_ATTR(event, "config:0-15"); PMU_FORMAT_ATTR(long, "config1:0"); +PMU_FORMAT_ATTR(rdpmc, "config1:1"); + +static int sysctl_perf_user_access __read_mostly; static inline bool armv8pmu_event_is_64bit(struct perf_event *event) { return event->attr.config1 & 0x1; } +static inline bool armv8pmu_event_want_user_access(struct perf_event *event) +{ + return event->attr.config1 & 0x2; +} + static struct attribute *armv8_pmuv3_format_attrs[] = { &format_attr_event.attr, &format_attr_long.attr, + &format_attr_rdpmc.attr, NULL, }; @@ -362,7 +371,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { */ #define ARMV8_IDX_CYCLE_COUNTER 0 #define ARMV8_IDX_COUNTER0 1 - +#define ARMV8_IDX_CYCLE_COUNTER_USER 32 /* * We unconditionally enable ARMv8.5-PMU long event counter support @@ -374,18 +383,22 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); } +static inline bool armv8pmu_event_has_user_read(struct perf_event *event) +{ + return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT; +} + /* * We must chain two programmable counters for 64 bit events, * except when we have allocated the 64bit cycle counter (for CPU - * cycles event). This must be called only when the event has - * a counter allocated. + * cycles event) or when user space counter access is enabled. */ static inline bool armv8pmu_event_is_chained(struct perf_event *event) { int idx = event->hw.idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - return !WARN_ON(idx < 0) && + return !armv8pmu_event_has_user_read(event) && armv8pmu_event_is_64bit(event) && !armv8pmu_has_long_event(cpu_pmu) && (idx != ARMV8_IDX_CYCLE_COUNTER); @@ -718,6 +731,28 @@ static inline u32 armv8pmu_getreset_flags(void) return value; } +static void armv8pmu_disable_user_access(void) +{ + write_sysreg(0, pmuserenr_el0); +} + +static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) +{ + int i; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); + + /* Clear any unused counters to avoid leaking their contents */ + for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { + if (i == ARMV8_IDX_CYCLE_COUNTER) + write_sysreg(0, pmccntr_el0); + else + armv8pmu_write_evcntr(i, 0); + } + + write_sysreg(0, pmuserenr_el0); + write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0); +} + static void armv8pmu_enable_event(struct perf_event *event) { /* @@ -761,6 +796,14 @@ static void armv8pmu_disable_event(struct perf_event *event) static void armv8pmu_start(struct arm_pmu *cpu_pmu) { + struct perf_event_context *task_ctx = + this_cpu_ptr(cpu_pmu->pmu.pmu_cpu_context)->task_ctx; + + if (sysctl_perf_user_access && task_ctx && task_ctx->nr_user) + armv8pmu_enable_user_access(cpu_pmu); + else + armv8pmu_disable_user_access(); + /* Enable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); } @@ -878,13 +921,16 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return ARMV8_IDX_CYCLE_COUNTER; + else if (armv8pmu_event_is_64bit(event) && + armv8pmu_event_want_user_access(event) && + !armv8pmu_has_long_event(cpu_pmu)) + return -EAGAIN; } /* * Otherwise use events counters */ - if (armv8pmu_event_is_64bit(event) && - !armv8pmu_has_long_event(cpu_pmu)) + if (armv8pmu_event_is_chained(event)) return armv8pmu_get_chain_idx(cpuc, cpu_pmu); else return armv8pmu_get_single_idx(cpuc, cpu_pmu); @@ -900,6 +946,22 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, clear_bit(idx - 1, cpuc->used_mask); } +static int armv8pmu_user_event_idx(struct perf_event *event) +{ + if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event)) + return 0; + + /* + * We remap the cycle counter index to 32 to + * match the offset applied to the rest of + * the counter indices. + */ + if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER) + return ARMV8_IDX_CYCLE_COUNTER_USER; + + return event->hw.idx; +} + /* * Add an event filter to a given event. */ @@ -996,6 +1058,25 @@ static int __armv8_pmuv3_map_event(struct perf_event *event, if (armv8pmu_event_is_64bit(event)) event->hw.flags |= ARMPMU_EVT_64BIT; + /* + * User events must be allocated into a single counter, and so + * must not be chained. + * + * Most 64-bit events require long counter support, but 64-bit + * CPU_CYCLES events can be placed into the dedicated cycle + * counter when this is free. + */ + if (armv8pmu_event_want_user_access(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + return -EINVAL; + if (armv8pmu_event_is_64bit(event) && + (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && + !armv8pmu_has_long_event(armpmu)) + return -EOPNOTSUPP; + + event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; + } + /* Only expose micro/arch events supported by this PMU */ if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { @@ -1104,6 +1185,43 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) return probe.present ? 0 : -ENODEV; } +static void armv8pmu_disable_user_access_ipi(void *unused) +{ + armv8pmu_disable_user_access(); +} + +static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (ret || !write || sysctl_perf_user_access) + return ret; + + on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1); + return 0; +} + +static struct ctl_table armv8_pmu_sysctl_table[] = { + { + .procname = "perf_user_access", + .data = &sysctl_perf_user_access, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = armv8pmu_proc_user_access_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { } +}; + +static void armv8_pmu_register_sysctl_table(void) +{ + static u32 tbl_registered = 0; + + if (!cmpxchg_relaxed(&tbl_registered, 0, 1)) + register_sysctl("kernel", armv8_pmu_sysctl_table); +} + static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, int (*map_event)(struct perf_event *event), const struct attribute_group *events, @@ -1127,6 +1245,8 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, cpu_pmu->set_event_filter = armv8pmu_set_event_filter; cpu_pmu->filter_match = armv8pmu_filter_match; + cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx; + cpu_pmu->name = name; cpu_pmu->map_event = map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ? @@ -1136,6 +1256,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ? caps : &armv8_pmuv3_caps_attr_group; + armv8_pmu_register_sysctl_table(); return 0; } @@ -1145,17 +1266,32 @@ static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name, return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL); } -static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3", - armv8_pmuv3_map_event); +#define PMUV3_INIT_SIMPLE(name) \ +static int name##_pmu_init(struct arm_pmu *cpu_pmu) \ +{ \ + return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\ } -static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34", - armv8_pmuv3_map_event); -} +PMUV3_INIT_SIMPLE(armv8_pmuv3) + +PMUV3_INIT_SIMPLE(armv8_cortex_a34) +PMUV3_INIT_SIMPLE(armv8_cortex_a55) +PMUV3_INIT_SIMPLE(armv8_cortex_a65) +PMUV3_INIT_SIMPLE(armv8_cortex_a75) +PMUV3_INIT_SIMPLE(armv8_cortex_a76) +PMUV3_INIT_SIMPLE(armv8_cortex_a77) +PMUV3_INIT_SIMPLE(armv8_cortex_a78) +PMUV3_INIT_SIMPLE(armv9_cortex_a510) +PMUV3_INIT_SIMPLE(armv9_cortex_a710) +PMUV3_INIT_SIMPLE(armv8_cortex_x1) +PMUV3_INIT_SIMPLE(armv9_cortex_x2) +PMUV3_INIT_SIMPLE(armv8_neoverse_e1) +PMUV3_INIT_SIMPLE(armv8_neoverse_n1) +PMUV3_INIT_SIMPLE(armv9_neoverse_n2) +PMUV3_INIT_SIMPLE(armv8_neoverse_v1) + +PMUV3_INIT_SIMPLE(armv8_nvidia_carmel) +PMUV3_INIT_SIMPLE(armv8_nvidia_denver) static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { @@ -1169,24 +1305,12 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) armv8_a53_map_event); } -static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55", - armv8_pmuv3_map_event); -} - static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event); } -static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65", - armv8_pmuv3_map_event); -} - static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72", @@ -1199,42 +1323,6 @@ static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) armv8_a73_map_event); } -static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75", - armv8_pmuv3_map_event); -} - -static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76", - armv8_pmuv3_map_event); -} - -static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77", - armv8_pmuv3_map_event); -} - -static int armv8_a78_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a78", - armv8_pmuv3_map_event); -} - -static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1", - armv8_pmuv3_map_event); -} - -static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1", - armv8_pmuv3_map_event); -} - static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder", @@ -1248,23 +1336,31 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) } static const struct of_device_id armv8_pmu_of_device_ids[] = { - {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, - {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init}, + {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init}, + {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init}, {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, - {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init}, + {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init}, {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, - {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init}, + {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init}, {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, - {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init}, - {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init}, - {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init}, - {.compatible = "arm,cortex-a78-pmu", .data = armv8_a78_pmu_init}, - {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init}, - {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init}, + {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init}, + {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init}, + {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init}, + {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init}, + {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init}, + {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init}, + {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init}, + {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init}, + {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init}, + {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init}, + {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init}, + {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init}, {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, + {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init}, + {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init}, {}, }; @@ -1287,7 +1383,7 @@ static int __init armv8_pmu_driver_init(void) if (acpi_disabled) return platform_driver_register(&armv8_pmu_driver); else - return arm_pmu_acpi_probe(armv8_pmuv3_init); + return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init); } device_initcall(armv8_pmu_driver_init) @@ -1301,6 +1397,14 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; userpg->cap_user_time_short = 0; + userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event); + + if (userpg->cap_user_rdpmc) { + if (event->hw.flags & ARMPMU_EVT_64BIT) + userpg->pmc_width = 64; + else + userpg->pmc_width = 32; + } do { rd = sched_clock_read_begin(&seq); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index aacf2f5559a8..5369e649fa79 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -40,6 +40,7 @@ #include <linux/percpu.h> #include <linux/thread_info.h> #include <linux/prctl.h> +#include <linux/stacktrace.h> #include <asm/alternative.h> #include <asm/compat.h> @@ -439,34 +440,26 @@ static void entry_task_switch(struct task_struct *next) /* * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. - * Assuming the virtual counter is enabled at the beginning of times: - * - * - disable access when switching from a 64bit task to a 32bit task - * - enable access when switching from a 32bit task to a 64bit task + * Ensure access is disabled when switching to a 32bit task, ensure + * access is enabled when switching to a 64bit task. */ -static void erratum_1418040_thread_switch(struct task_struct *prev, - struct task_struct *next) +static void erratum_1418040_thread_switch(struct task_struct *next) { - bool prev32, next32; - u64 val; - - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040)) - return; - - prev32 = is_compat_thread(task_thread_info(prev)); - next32 = is_compat_thread(task_thread_info(next)); - - if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || + !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) return; - val = read_sysreg(cntkctl_el1); - - if (!next32) - val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + if (is_compat_thread(task_thread_info(next))) + sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); else - val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); +} - write_sysreg(val, cntkctl_el1); +static void erratum_1418040_new_exec(void) +{ + preempt_disable(); + erratum_1418040_thread_switch(current); + preempt_enable(); } /* @@ -490,7 +483,8 @@ void update_sctlr_el1(u64 sctlr) /* * Thread switching. */ -__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, +__notrace_funcgraph __sched +struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) { struct task_struct *last; @@ -501,7 +495,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); ssbs_thread_switch(next); - erratum_1418040_thread_switch(prev, next); + erratum_1418040_thread_switch(next); ptrauth_thread_switch_user(next); /* @@ -528,30 +522,37 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, return last; } +struct wchan_info { + unsigned long pc; + int count; +}; + +static bool get_wchan_cb(void *arg, unsigned long pc) +{ + struct wchan_info *wchan_info = arg; + + if (!in_sched_functions(pc)) { + wchan_info->pc = pc; + return false; + } + return wchan_info->count++ < 16; +} + unsigned long __get_wchan(struct task_struct *p) { - struct stackframe frame; - unsigned long stack_page, ret = 0; - int count = 0; + struct wchan_info wchan_info = { + .pc = 0, + .count = 0, + }; - stack_page = (unsigned long)try_get_task_stack(p); - if (!stack_page) + if (!try_get_task_stack(p)) return 0; - start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); - - do { - if (unwind_frame(p, &frame)) - goto out; - if (!in_sched_functions(frame.pc)) { - ret = frame.pc; - goto out; - } - } while (count++ < 16); + arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); -out: put_task_stack(p); - return ret; + + return wchan_info.pc; } unsigned long arch_align_stack(unsigned long sp) @@ -611,6 +612,7 @@ void arch_setup_new_exec(void) current->mm->context.flags = mmflags; ptrauth_thread_init_user(); mte_thread_init_user(); + erratum_1418040_new_exec(); if (task_spec_ssb_noexec(current)) { arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 88a9034fb9b5..716dde289446 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -812,9 +812,9 @@ static int sve_set(struct task_struct *target, /* * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by - * sve_set_vector_length(), which will also validate them for us: + * vec_set_vector_length(), which will also validate them for us: */ - ret = sve_set_vector_length(target, header.vl, + ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl, ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); if (ret) goto out; diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index a6d18755652f..68330017d04f 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -9,9 +9,9 @@ #include <linux/export.h> #include <linux/ftrace.h> #include <linux/kprobes.h> +#include <linux/stacktrace.h> #include <asm/stack_pointer.h> -#include <asm/stacktrace.h> struct return_address_data { unsigned int level; @@ -35,15 +35,11 @@ NOKPROBE_SYMBOL(save_return_addr); void *return_address(unsigned int level) { struct return_address_data data; - struct stackframe frame; data.level = level + 2; data.addr = NULL; - start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)return_address); - walk_stackframe(current, &frame, save_return_addr, &data); + arch_stack_walk(save_return_addr, &data, current, NULL); if (!data.level) return data.addr; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index be5f85b0a24d..f70573928f1b 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -189,11 +189,16 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) if (!dt_virt || !early_init_dt_scan(dt_virt)) { pr_crit("\n" - "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" + "Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n" "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" "\nPlease check your bootloader.", &dt_phys, dt_virt); + /* + * Note that in this _really_ early stage we cannot even BUG() + * or oops, so the least terrible thing to do is cpu_relax(), + * or else we could end-up printing non-initialized data, etc. + */ while (true) cpu_relax(); } @@ -232,12 +237,14 @@ static void __init request_standard_resources(void) if (memblock_is_nomap(region)) { res->name = "reserved"; res->flags = IORESOURCE_MEM; + res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; } else { res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; } - res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); - res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; request_resource(&iomem_resource, res); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 94f83cd44e50..0fb58fed54cb 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -33,8 +33,8 @@ */ -void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc) +static void start_backtrace(struct stackframe *frame, unsigned long fp, + unsigned long pc) { frame->fp = fp; frame->pc = pc; @@ -63,7 +63,8 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B. */ -int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) +static int notrace unwind_frame(struct task_struct *tsk, + struct stackframe *frame) { unsigned long fp = frame->fp; struct stack_info info; @@ -141,8 +142,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) } NOKPROBE_SYMBOL(unwind_frame); -void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - bool (*fn)(void *, unsigned long), void *data) +static void notrace walk_stackframe(struct task_struct *tsk, + struct stackframe *frame, + bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; @@ -156,24 +158,20 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, } NOKPROBE_SYMBOL(walk_stackframe); -static void dump_backtrace_entry(unsigned long where, const char *loglvl) +static bool dump_backtrace_entry(void *arg, unsigned long where) { + char *loglvl = arg; printk("%s %pSb\n", loglvl, (void *)where); + return true; } void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) { - struct stackframe frame; - int skip = 0; - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - if (regs) { - if (user_mode(regs)) - return; - skip = 1; - } + if (regs && user_mode(regs)) + return; if (!tsk) tsk = current; @@ -181,36 +179,8 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, if (!try_get_task_stack(tsk)) return; - if (tsk == current) { - start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)dump_backtrace); - } else { - /* - * task blocked in __switch_to - */ - start_backtrace(&frame, - thread_saved_fp(tsk), - thread_saved_pc(tsk)); - } - printk("%sCall trace:\n", loglvl); - do { - /* skip until specified stack frame */ - if (!skip) { - dump_backtrace_entry(frame.pc, loglvl); - } else if (frame.fp == regs->regs[29]) { - skip = 0; - /* - * Mostly, this is the case where this function is - * called in panic/abort. As exception handler's - * stack frame does not contain the corresponding pc - * at which an exception has taken place, use regs->pc - * instead. - */ - dump_backtrace_entry(regs->pc, loglvl); - } - } while (!unwind_frame(tsk, &frame)); + arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); put_task_stack(tsk); } @@ -221,8 +191,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) barrier(); } -#ifdef CONFIG_STACKTRACE - noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) @@ -241,5 +209,3 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, walk_stackframe(task, &frame, consume_entry, cookie); } - -#endif diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index eebbc8d7123e..b5855eb7435d 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -18,6 +18,7 @@ #include <linux/timex.h> #include <linux/errno.h> #include <linux/profile.h> +#include <linux/stacktrace.h> #include <linux/syscore_ops.h> #include <linux/timer.h> #include <linux/irq.h> @@ -29,25 +30,25 @@ #include <clocksource/arm_arch_timer.h> #include <asm/thread_info.h> -#include <asm/stacktrace.h> #include <asm/paravirt.h> -unsigned long profile_pc(struct pt_regs *regs) +static bool profile_pc_cb(void *arg, unsigned long pc) { - struct stackframe frame; + unsigned long *prof_pc = arg; - if (!in_lock_functions(regs->pc)) - return regs->pc; + if (in_lock_functions(pc)) + return true; + *prof_pc = pc; + return false; +} - start_backtrace(&frame, regs->regs[29], regs->pc); +unsigned long profile_pc(struct pt_regs *regs) +{ + unsigned long prof_pc = 0; - do { - int ret = unwind_frame(NULL, &frame); - if (ret < 0) - return 0; - } while (in_lock_functions(frame.pc)); + arch_stack_walk(profile_pc_cb, &prof_pc, current, regs); - return frame.pc; + return prof_pc; } EXPORT_SYMBOL(profile_pc); diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 700767dfd221..60813497a381 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -32,6 +32,7 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \ $(CC_FLAGS_LTO) KASAN_SANITIZE := n +KCSAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 275a27368a04..5abe0617f2af 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -140,9 +140,12 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu) return 1; } +/* + * Guest access to SVE registers should be routed to this handler only + * when the system doesn't support SVE. + */ static int handle_sve(struct kvm_vcpu *vcpu) { - /* Until SVE is supported for guests: */ kvm_inject_undefined(vcpu); return 1; } diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index c3c11974fa3b..24b2c2425b38 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -89,6 +89,7 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI) # cause crashes. Just disable it. GCOV_PROFILE := n KASAN_SANITIZE := n +KCSAN_SANITIZE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 426bd7fbc3fd..27386f0d81e4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -52,10 +52,10 @@ int kvm_arm_init_sve(void) * The get_sve_reg()/set_sve_reg() ioctl interface will need * to be extended with multiple register slice support in * order to support vector lengths greater than - * SVE_VL_ARCH_MAX: + * VL_ARCH_MAX: */ - if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) - kvm_sve_max_vl = SVE_VL_ARCH_MAX; + if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX)) + kvm_sve_max_vl = VL_ARCH_MAX; /* * Don't even try to make use of vector lengths that @@ -103,7 +103,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) * set_sve_vls(). Double-check here just to be sure: */ if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() || - vl > SVE_VL_ARCH_MAX)) + vl > VL_ARCH_MAX)) return -EIO; buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3ec1a44f94d..4dc2fba316ff 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1525,7 +1525,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* CRm=6 */ ID_SANITISED(ID_AA64ISAR0_EL1), ID_SANITISED(ID_AA64ISAR1_EL1), - ID_UNALLOCATED(6,2), + ID_SANITISED(ID_AA64ISAR2_EL1), ID_UNALLOCATED(6,3), ID_UNALLOCATED(6,4), ID_UNALLOCATED(6,5), diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index b84b179edba3..1fd5d790ab80 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -16,6 +16,7 @@ */ SYM_FUNC_START_PI(clear_page) mrs x1, dczid_el0 + tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */ and w1, w1, #0xf mov x2, #4 lsl x1, x2, x1 @@ -25,5 +26,14 @@ SYM_FUNC_START_PI(clear_page) tst x0, #(PAGE_SIZE - 1) b.ne 1b ret + +2: stnp xzr, xzr, [x0] + stnp xzr, xzr, [x0, #16] + stnp xzr, xzr, [x0, #32] + stnp xzr, xzr, [x0, #48] + add x0, x0, #64 + tst x0, #(PAGE_SIZE - 1) + b.ne 2b + ret SYM_FUNC_END_PI(clear_page) EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S index 5b04464c045e..20784ce75def 100644 --- a/arch/arm64/lib/kasan_sw_tags.S +++ b/arch/arm64/lib/kasan_sw_tags.S @@ -38,9 +38,7 @@ * incremented by 256 prior to return). */ SYM_CODE_START(__hwasan_tag_mismatch) -#ifdef BTI_C - BTI_C -#endif + bti c add x29, sp, #232 stp x2, x3, [sp, #8 * 2] stp x4, x5, [sp, #8 * 4] diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index e83643b3995f..f531dcb95174 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -43,17 +43,23 @@ SYM_FUNC_END(mte_clear_page_tags) * x0 - address to the beginning of the page */ SYM_FUNC_START(mte_zero_clear_page_tags) + and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag mrs x1, dczid_el0 + tbnz x1, #4, 2f // Branch if DC GZVA is prohibited and w1, w1, #0xf mov x2, #4 lsl x1, x2, x1 - and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag 1: dc gzva, x0 add x0, x0, x1 tst x0, #(PAGE_SIZE - 1) b.ne 1b ret + +2: stz2g x0, [x0], #(MTE_GRANULE_SIZE * 2) + tst x0, #(PAGE_SIZE - 1) + b.ne 2b + ret SYM_FUNC_END(mte_zero_clear_page_tags) /* diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c index 11bf4f8aca68..d189cf4e70ea 100644 --- a/arch/arm64/lib/xor-neon.c +++ b/arch/arm64/lib/xor-neon.c @@ -167,7 +167,7 @@ void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -struct xor_block_template const xor_block_inner_neon = { +struct xor_block_template xor_block_inner_neon __ro_after_init = { .name = "__inner_neon__", .do_2 = xor_arm64_neon_2, .do_3 = xor_arm64_neon_3, @@ -176,6 +176,151 @@ struct xor_block_template const xor_block_inner_neon = { }; EXPORT_SYMBOL(xor_block_inner_neon); +static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r) +{ + uint64x2_t res; + + asm(ARM64_ASM_PREAMBLE ".arch_extension sha3\n" + "eor3 %0.16b, %1.16b, %2.16b, %3.16b" + : "=w"(res) : "w"(p), "w"(q), "w"(r)); + return res; +} + +static void xor_arm64_eor3_3(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 ^ p3 */ + v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0), + vld1q_u64(dp3 + 0)); + v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2), + vld1q_u64(dp3 + 2)); + v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4), + vld1q_u64(dp3 + 4)); + v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6), + vld1q_u64(dp3 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + } while (--lines > 0); +} + +static void xor_arm64_eor3_4(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, + unsigned long *p4) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 ^ p3 */ + v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0), + vld1q_u64(dp3 + 0)); + v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2), + vld1q_u64(dp3 + 2)); + v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4), + vld1q_u64(dp3 + 4)); + v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6), + vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 */ + v0 = veorq_u64(v0, vld1q_u64(dp4 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp4 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp4 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp4 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + } while (--lines > 0); +} + +static void xor_arm64_eor3_5(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, + unsigned long *p4, unsigned long *p5) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + uint64_t *dp5 = (uint64_t *)p5; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 ^ p3 */ + v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0), + vld1q_u64(dp3 + 0)); + v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2), + vld1q_u64(dp3 + 2)); + v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4), + vld1q_u64(dp3 + 4)); + v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6), + vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 ^ p5 */ + v0 = eor3(v0, vld1q_u64(dp4 + 0), vld1q_u64(dp5 + 0)); + v1 = eor3(v1, vld1q_u64(dp4 + 2), vld1q_u64(dp5 + 2)); + v2 = eor3(v2, vld1q_u64(dp4 + 4), vld1q_u64(dp5 + 4)); + v3 = eor3(v3, vld1q_u64(dp4 + 6), vld1q_u64(dp5 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + dp5 += 8; + } while (--lines > 0); +} + +static int __init xor_neon_init(void) +{ + if (IS_ENABLED(CONFIG_AS_HAS_SHA3) && cpu_have_named_feature(SHA3)) { + xor_block_inner_neon.do_3 = xor_arm64_eor3_3; + xor_block_inner_neon.do_4 = xor_arm64_eor3_4; + xor_block_inner_neon.do_5 = xor_arm64_eor3_5; + } + return 0; +} +module_init(xor_neon_init); + +static void __exit xor_neon_exit(void) +{ +} +module_exit(xor_neon_exit); + MODULE_AUTHOR("Jackie Liu <liuyun01@kylinos.cn>"); MODULE_DESCRIPTION("ARMv8 XOR Extensions"); MODULE_LICENSE("GPL"); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 5051b3c1a4f1..7d0563db4201 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -140,15 +140,7 @@ SYM_FUNC_END(dcache_clean_pou) * - start - kernel start address of region * - end - kernel end address of region */ -SYM_FUNC_START_LOCAL(__dma_inv_area) SYM_FUNC_START_PI(dcache_inval_poc) - /* FALLTHROUGH */ - -/* - * __dma_inv_area(start, end) - * - start - virtual start address of region - * - end - virtual end address of region - */ dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? @@ -167,7 +159,6 @@ SYM_FUNC_START_PI(dcache_inval_poc) dsb sy ret SYM_FUNC_END_PI(dcache_inval_poc) -SYM_FUNC_END(__dma_inv_area) /* * dcache_clean_poc(start, end) @@ -178,19 +169,10 @@ SYM_FUNC_END(__dma_inv_area) * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START_LOCAL(__dma_clean_area) SYM_FUNC_START_PI(dcache_clean_poc) - /* FALLTHROUGH */ - -/* - * __dma_clean_area(start, end) - * - start - virtual start address of region - * - end - virtual end address of region - */ dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(dcache_clean_poc) -SYM_FUNC_END(__dma_clean_area) /* * dcache_clean_pop(start, end) @@ -232,8 +214,8 @@ SYM_FUNC_END_PI(__dma_flush_area) SYM_FUNC_START_PI(__dma_map_area) add x1, x0, x1 cmp w2, #DMA_FROM_DEVICE - b.eq __dma_inv_area - b __dma_clean_area + b.eq __pi_dcache_inval_poc + b __pi_dcache_clean_poc SYM_FUNC_END_PI(__dma_map_area) /* @@ -245,6 +227,6 @@ SYM_FUNC_END_PI(__dma_map_area) SYM_FUNC_START_PI(__dma_unmap_area) add x1, x0, x1 cmp w2, #DMA_TO_DEVICE - b.ne __dma_inv_area + b.ne __pi_dcache_inval_poc ret SYM_FUNC_END_PI(__dma_unmap_area) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index cd72576ae2b7..b8b4cf0bcf39 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -35,8 +35,8 @@ static unsigned long *pinned_asid_map; #define ASID_FIRST_VERSION (1UL << asid_bits) #define NUM_USER_ASIDS ASID_FIRST_VERSION -#define asid2idx(asid) ((asid) & ~ASID_MASK) -#define idx2asid(idx) asid2idx(idx) +#define ctxid2asid(asid) ((asid) & ~ASID_MASK) +#define asid2ctxid(asid, genid) ((asid) | (genid)) /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -50,10 +50,10 @@ static u32 get_cpu_asid_bits(void) pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); fallthrough; - case 0: + case ID_AA64MMFR0_ASID_8: asid = 8; break; - case 2: + case ID_AA64MMFR0_ASID_16: asid = 16; } @@ -120,7 +120,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(ctxid2asid(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { - u64 newasid = generation | (asid & ~ASID_MASK); + u64 newasid = asid2ctxid(ctxid2asid(asid), generation); /* * If our current ASID was active during a rollover, we @@ -183,7 +183,7 @@ static u64 new_context(struct mm_struct *mm) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(ctxid2asid(asid), asid_map)) return newasid; } @@ -209,7 +209,7 @@ static u64 new_context(struct mm_struct *mm) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return idx2asid(asid) | generation; + return asid2ctxid(asid, generation); } void check_and_switch_context(struct mm_struct *mm) @@ -300,13 +300,13 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) } nr_pinned_asids++; - __set_bit(asid2idx(asid), pinned_asid_map); + __set_bit(ctxid2asid(asid), pinned_asid_map); refcount_set(&mm->context.pinned, 1); out_unlock: raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); - asid &= ~ASID_MASK; + asid = ctxid2asid(asid); /* Set the equivalent of USER_ASID_BIT */ if (asid && arm64_kernel_unmapped_at_el0()) @@ -327,7 +327,7 @@ void arm64_mm_context_put(struct mm_struct *mm) raw_spin_lock_irqsave(&cpu_asid_lock, flags); if (refcount_dec_and_test(&mm->context.pinned)) { - __clear_bit(asid2idx(asid), pinned_asid_map); + __clear_bit(ctxid2asid(asid), pinned_asid_map); nr_pinned_asids--; } diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index c3d53811a15e..c0181e60cc98 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -10,9 +10,6 @@ #include <asm/asm-extable.h> #include <asm/ptrace.h> -typedef bool (*ex_handler_t)(const struct exception_table_entry *, - struct pt_regs *); - static inline unsigned long get_ex_fixup(const struct exception_table_entry *ex) { diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9ae24e3b72be..9a9e7675b187 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -297,6 +297,8 @@ static void die_kernel_fault(const char *msg, unsigned long addr, pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, addr); + kasan_non_canonical_hook(addr); + mem_abort_decode(esr); show_pte(addr); @@ -813,11 +815,8 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) if (!inf->fn(far, esr, regs)) return; - if (!user_mode(regs)) { - pr_alert("Unhandled fault at 0x%016lx\n", addr); - mem_abort_decode(esr); - show_pte(addr); - } + if (!user_mode(regs)) + die_kernel_fault(inf->name, addr, esr, regs); /* * At this point we have an unrecognized fault type whose tag bits may diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 5fa68c2ef1f8..b039877c743d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -47,7 +47,7 @@ obj-y := cputable.o syscalls.o \ udbg.o misc.o io.o misc_$(BITS).o \ of_platform.o prom_parse.o firmware.o \ hw_breakpoint_constraints.o interrupt.o \ - kdebugfs.o + kdebugfs.o stacktrace.o obj-y += ptrace/ obj-$(CONFIG_PPC64) += setup_64.o \ paca.o nvram_64.o note.o @@ -116,7 +116,6 @@ obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 0fcdc0233fac..201ee206fb57 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -139,12 +139,8 @@ unsigned long __get_wchan(struct task_struct *task) return pc; } -#ifdef CONFIG_STACKTRACE - noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { walk_stackframe(task, regs, consume_entry, cookie); } - -#endif /* CONFIG_STACKTRACE */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 80f500ffb55c..be8007f367aa 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o -obj-y += smp.o text_amode31.o +obj-y += smp.o text_amode31.o stacktrace.o extra-y += head64.o vmlinux.lds @@ -55,7 +55,6 @@ compat-obj-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o obj-$(CONFIG_COMPAT) += $(compat-obj-y) obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes_insn_page.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 38b2c779146f..68dea7ce6a22 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2476,7 +2476,7 @@ static int x86_pmu_event_init(struct perf_event *event) if (READ_ONCE(x86_pmu.attr_rdpmc) && !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) - event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; + event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; return err; } @@ -2510,7 +2510,7 @@ void perf_clear_dirty_counters(void) static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) { - if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) + if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) return; /* @@ -2531,7 +2531,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) { - if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) + if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) return; if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) @@ -2542,7 +2542,7 @@ static int x86_pmu_event_idx(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED)) + if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) return 0; if (is_metric_idx(hwc->idx)) @@ -2725,7 +2725,7 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; userpg->cap_user_rdpmc = - !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); + !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT); userpg->pmc_width = x86_pmu.cntval_bits; if (!using_native_sched_clock() || !sched_clock_stable()) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 5480db242083..9d376e528dfc 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -74,7 +74,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode) #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */ #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */ #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */ -#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */ + #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */ #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 2ff3e600f426..6aef9ee28a39 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -84,7 +84,7 @@ obj-$(CONFIG_IA32_EMULATION) += tls.o obj-y += step.o obj-$(CONFIG_INTEL_TXT) += tboot.o obj-$(CONFIG_ISA_DMA_API) += i8237.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-y += stacktrace.o obj-y += cpu/ obj-y += acpi/ obj-y += reboot.o |