diff options
author | Mark Brown | 2022-05-03 18:02:24 +0100 |
---|---|---|
committer | Catalin Marinas | 2022-05-04 15:30:27 +0100 |
commit | bc249e37b9334826de29111c5350c3e7a08a3969 (patch) | |
tree | c5352e4a12f38ef68c15bab0d39d663f5c9d5217 /arch | |
parent | 96f101a9eab479dbfbdf7713ba966f9031c9c045 (diff) |
arm64/mte: Make TCF field values and naming more standard
In preparation for automatic generation of the defines for system registers
make the values used for the enumeration in SCTLR_ELx.TCF suitable for use
with the newly defined SYS_FIELD_PREP_ENUM helper, removing the shift from
the define and using the helper to generate it on use instead. Since we
only ever interact with this field in EL1 and in preparation for generation
of the defines also rename from SCTLR_ELx to SCTLR_EL1. SCTLR_EL2 is not
quite the same as SCTLR_EL1 so the conversion does not share the field
definitions.
There should be no functional change from this patch.
Signed-off-by: Mark Brown <broonie@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20220503170233.507788-4-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 14 | ||||
-rw-r--r-- | arch/arm64/kernel/mte.c | 9 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 3 |
3 files changed, 14 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6dc840be0268..732d84111d9f 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -631,13 +631,6 @@ #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) -#define SCTLR_ELx_TCF_SHIFT 40 -#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) - #define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ITFSB (BIT(37)) @@ -677,6 +670,13 @@ #define SCTLR_EL1_EPAN (BIT(57)) #define SCTLR_EL1_ATA0 (BIT(42)) +#define SCTLR_EL1_TCF_SHIFT 40 +#define SCTLR_EL1_TCF_NONE (UL(0x0)) +#define SCTLR_EL1_TCF_SYNC (UL(0x1)) +#define SCTLR_EL1_TCF_ASYNC (UL(0x2)) +#define SCTLR_EL1_TCF_ASYMM (UL(0x3)) +#define SCTLR_EL1_TCF_MASK (UL(0x3) << SCTLR_EL1_TCF_SHIFT) + #define SCTLR_EL1_TCF0_SHIFT 38 #define SCTLR_EL1_TCF0_NONE (UL(0x0)) #define SCTLR_EL1_TCF0_SYNC (UL(0x1)) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 41469b69a48e..98f5e1e13c36 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -106,7 +106,8 @@ int memcmp_pages(struct page *page1, struct page *page2) static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) { /* Enable MTE Sync Mode for EL1. */ - sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf)); isb(); pr_info_once("MTE: enabled in %s mode at EL1\n", mode); @@ -122,12 +123,12 @@ void mte_enable_kernel_sync(void) WARN_ONCE(system_uses_mte_async_or_asymm_mode(), "MTE async mode enabled system wide!"); - __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); + __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC); } void mte_enable_kernel_async(void) { - __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC); + __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC); /* * MTE async mode is set system wide by the first PE that @@ -144,7 +145,7 @@ void mte_enable_kernel_async(void) void mte_enable_kernel_asymm(void) { if (cpus_have_cap(ARM64_MTE_ASYMM)) { - __mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM); + __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM); /* * MTE asymm mode behaves as async mode for store diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 77341b160aca..5e280cc566ca 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -335,7 +335,8 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, * It will be done lazily on the other CPUs when they will hit a * tag fault. */ - sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE)); isb(); } |