diff options
author | Catalin Marinas | 2022-05-20 18:50:57 +0100 |
---|---|---|
committer | Catalin Marinas | 2022-05-20 18:50:57 +0100 |
commit | e003d5335c3877bcbee8d0c347d3c3ee36cdd8b7 (patch) | |
tree | a73180a13d875674f1bc57ec0484b5d118d3e1ef /arch/arm64/kernel | |
parent | 201729d53a34924cfcd011f71e49de9bb2902bd8 (diff) | |
parent | dffdeade18432d257e0c1845dc4e694f414a9721 (diff) |
Merge branch 'for-next/sysreg-gen' into for-next/core
* for-next/sysreg-gen: (32 commits)
: Automatic system register definition generation.
arm64/sysreg: Generate definitions for FAR_ELx
arm64/sysreg: Generate definitions for DACR32_EL2
arm64/sysreg: Generate definitions for CSSELR_EL1
arm64/sysreg: Generate definitions for CPACR_ELx
arm64/sysreg: Generate definitions for CONTEXTIDR_ELx
arm64/sysreg: Generate definitions for CLIDR_EL1
arm64/sve: Generate ZCR definitions
arm64/sme: Generate defintions for SVCR
arm64/sme: Generate SMPRI_EL1 definitions
arm64/sme: Automatically generate SMPRIMAP_EL2 definitions
arm64/sme: Automatically generate SMIDR_EL1 defines
arm64/sme: Automatically generate defines for SMCR
arm64/sysreg: Support generation of RAZ fields
arm64/sme: Remove _EL0 from name of SVCR - FIXME sysreg.h
arm64/sme: Standardise bitfield names for SVCR
arm64/sme: Drop SYS_ from SMIDR_EL1 defines
arm64/fp: Rename SVE and SME LEN field name to _WIDTH
arm64/fp: Make SVE and SME length register definition match architecture
arm64/sysreg: fix odd line spacing
arm64/sysreg: improve comment for regs without fields
...
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 74 | ||||
-rw-r--r-- | arch/arm64/kernel/fpsimd.c | 26 | ||||
-rw-r--r-- | arch/arm64/kernel/mte.c | 15 | ||||
-rw-r--r-- | arch/arm64/kernel/ptrace.c | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/signal.c | 14 | ||||
-rw-r--r-- | arch/arm64/kernel/syscall.c | 4 |
6 files changed, 71 insertions, 70 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 619324b8bcef..665ad380c07f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -191,20 +191,20 @@ static bool __system_matches_cap(unsigned int n); * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -577,13 +577,13 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = { static const struct arm64_ftr_bits ftr_zcr[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */ + ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */ ARM64_FTR_END, }; static const struct arm64_ftr_bits ftr_smcr[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_SIZE, 0), /* LEN */ + SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */ ARM64_FTR_END, }; @@ -2062,7 +2062,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 2, @@ -2244,10 +2244,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_TLB_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, - .min_field_value = ID_AA64ISAR0_TLB_RANGE, + .min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE, }, #ifdef CONFIG_ARM64_HW_AFDBM { @@ -2276,7 +2276,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_CRC32_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT, .field_width = 4, .min_field_value = 1, }, @@ -2431,7 +2431,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64ISAR0_EL1, - .field_pos = ID_AA64ISAR0_RNDR_SHIFT, + .field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT, .field_width = 4, .sign = FTR_UNSIGNED, .min_field_value = 1, @@ -2590,22 +2590,22 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { #endif static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a6eee3fa3448..c5677aa2e9e6 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -393,7 +393,7 @@ static void task_fpsimd_load(void) if (test_thread_flag(TIF_SME)) sme_set_vq(sve_vq_from_vl(sme_vl) - 1); - write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); + write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) za_load_state(current->thread.za_state); @@ -445,15 +445,15 @@ static void fpsimd_save(void) if (system_supports_sme()) { u64 *svcr = last->svcr; - *svcr = read_sysreg_s(SYS_SVCR_EL0); + *svcr = read_sysreg_s(SYS_SVCR); - *svcr = read_sysreg_s(SYS_SVCR_EL0); + *svcr = read_sysreg_s(SYS_SVCR); - if (*svcr & SYS_SVCR_EL0_ZA_MASK) + if (*svcr & SVCR_ZA_MASK) za_save_state(last->za_state); /* If we are in streaming mode override regular SVE. */ - if (*svcr & SYS_SVCR_EL0_SM_MASK) { + if (*svcr & SVCR_SM_MASK) { save_sve_regs = true; save_ffr = system_supports_fa64(); vl = last->sme_vl; @@ -851,8 +851,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, sve_to_fpsimd(task); if (system_supports_sme() && type == ARM64_VEC_SME) { - task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK | - SYS_SVCR_EL0_ZA_MASK); + task->thread.svcr &= ~(SVCR_SM_MASK | + SVCR_ZA_MASK); clear_thread_flag(TIF_SME); } @@ -1914,10 +1914,10 @@ void __efi_fpsimd_begin(void) __this_cpu_write(efi_sve_state_used, true); if (system_supports_sme()) { - svcr = read_sysreg_s(SYS_SVCR_EL0); + svcr = read_sysreg_s(SYS_SVCR); if (!system_supports_fa64()) - ffr = svcr & SYS_SVCR_EL0_SM_MASK; + ffr = svcr & SVCR_SM_MASK; __this_cpu_write(efi_sm_state, ffr); } @@ -1927,8 +1927,8 @@ void __efi_fpsimd_begin(void) ffr); if (system_supports_sme()) - sysreg_clear_set_s(SYS_SVCR_EL0, - SYS_SVCR_EL0_SM_MASK, 0); + sysreg_clear_set_s(SYS_SVCR, + SVCR_SM_MASK, 0); } else { fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); @@ -1961,9 +1961,9 @@ void __efi_fpsimd_end(void) */ if (system_supports_sme()) { if (__this_cpu_read(efi_sm_state)) { - sysreg_clear_set_s(SYS_SVCR_EL0, + sysreg_clear_set_s(SYS_SVCR, 0, - SYS_SVCR_EL0_SM_MASK); + SVCR_SM_MASK); if (!system_supports_fa64()) ffr = efi_sm_state; } diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 35697a09926f..bf526bd67b75 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -107,7 +107,8 @@ int memcmp_pages(struct page *page1, struct page *page2) static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) { /* Enable MTE Sync Mode for EL1. */ - sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf)); isb(); pr_info_once("MTE: enabled in %s mode at EL1\n", mode); @@ -123,12 +124,12 @@ void mte_enable_kernel_sync(void) WARN_ONCE(system_uses_mte_async_or_asymm_mode(), "MTE async mode enabled system wide!"); - __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); + __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC); } void mte_enable_kernel_async(void) { - __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC); + __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC); /* * MTE async mode is set system wide by the first PE that @@ -145,7 +146,7 @@ void mte_enable_kernel_async(void) void mte_enable_kernel_asymm(void) { if (cpus_have_cap(ARM64_MTE_ASYMM)) { - __mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM); + __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM); /* * MTE asymm mode behaves as async mode for store @@ -217,11 +218,11 @@ static void mte_update_sctlr_user(struct task_struct *task) * default order. */ if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM) - sctlr |= SCTLR_EL1_TCF0_ASYMM; + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM); else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) - sctlr |= SCTLR_EL1_TCF0_ASYNC; + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC); else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC) - sctlr |= SCTLR_EL1_TCF0_SYNC; + sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC); task->thread.sctlr_user = sctlr; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 60ebc3060cf1..21da83187a60 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -867,10 +867,10 @@ static int sve_set_common(struct task_struct *target, switch (type) { case ARM64_VEC_SVE: - target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + target->thread.svcr &= ~SVCR_SM_MASK; break; case ARM64_VEC_SME: - target->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + target->thread.svcr |= SVCR_SM_MASK; break; default: WARN_ON_ONCE(1); @@ -1100,7 +1100,7 @@ static int za_set(struct task_struct *target, /* If there is no data then disable ZA */ if (!count) { - target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + target->thread.svcr &= ~SVCR_ZA_MASK; goto out; } @@ -1125,7 +1125,7 @@ static int za_set(struct task_struct *target, /* Mark ZA as active and let userspace use it */ set_tsk_thread_flag(target, TIF_SME); - target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + target->thread.svcr |= SVCR_ZA_MASK; out: fpsimd_flush_task_state(target); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 2295948d97fd..18bf590dc1c7 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -288,7 +288,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (sve.head.size <= sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); - current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + current->thread.svcr &= ~SVCR_SM_MASK; goto fpsimd_only; } @@ -321,7 +321,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) return -EFAULT; if (sve.flags & SVE_SIG_FLAG_SM) - current->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + current->thread.svcr |= SVCR_SM_MASK; else set_thread_flag(TIF_SVE); @@ -398,7 +398,7 @@ static int restore_za_context(struct user_ctxs __user *user) return -EINVAL; if (za.head.size <= sizeof(*user->za)) { - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr &= ~SVCR_ZA_MASK; return 0; } @@ -419,7 +419,7 @@ static int restore_za_context(struct user_ctxs __user *user) sme_alloc(current); if (!current->thread.za_state) { - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); return -ENOMEM; } @@ -432,7 +432,7 @@ static int restore_za_context(struct user_ctxs __user *user) return -EFAULT; set_thread_flag(TIF_SME); - current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr |= SVCR_ZA_MASK; return 0; } @@ -922,8 +922,8 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* Signal handlers are invoked with ZA and streaming mode disabled */ if (system_supports_sme()) { - current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK | - SYS_SVCR_EL0_SM_MASK); + current->thread.svcr &= ~(SVCR_ZA_MASK | + SVCR_SM_MASK); sme_smstop(); } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 92c69e5ac269..733451fe7e41 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -174,9 +174,9 @@ static inline void fp_user_discard(void) * need updating. */ if (system_supports_sme() && test_thread_flag(TIF_SME)) { - u64 svcr = read_sysreg_s(SYS_SVCR_EL0); + u64 svcr = read_sysreg_s(SYS_SVCR); - if (svcr & SYS_SVCR_EL0_SM_MASK) + if (svcr & SVCR_SM_MASK) sme_smstop_sm(); } |