diff options
author | Tom Rini | 2021-09-03 10:40:28 -0400 |
---|---|---|
committer | Tom Rini | 2021-09-03 10:42:15 -0400 |
commit | 00179319714fd2076cf81f49de357ee699672f31 (patch) | |
tree | 995c59d4b9b004e506b6e644b866c73f71b502ff /arch/arm | |
parent | b35be5ed42c8453ac95432b6fbc0d42b1e91c758 (diff) |
Revert most of the series for adding vexpress_aemv8r support
Per a request from Andre Przywara and agreed with by Peter Hoyes, the
vexpress aemv8r support wasn't quite ready to be merged, but the
discussion had moved off list. We should keep the first patch in the
series for now, but revert the rest. This reverts the following
commits:
e0bd6f31ce41 doc: Add documentation for the Arm vexpress board configs
30e5a449e8c7 arm: Use armv8_switch_to_el1 env to switch to EL1
b53bbca63bf4 vexpress64: Add BASER_FVP vexpress board variant
2f5b7b74903f armv8: Add ARMv8 MPU configuration logic
37a757e227cc armv8: Ensure EL1&0 VMSA is enabled
Signed-off-by: Tom Rini <trini@konsulko.com>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/Kconfig | 7 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/cache_v8.c | 96 | ||||
-rw-r--r-- | arch/arm/include/asm/armv8/mpu.h | 61 | ||||
-rw-r--r-- | arch/arm/include/asm/macro.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 24 | ||||
-rw-r--r-- | arch/arm/lib/bootm.c | 40 |
6 files changed, 18 insertions, 227 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 47f094514b0..50efb5e2e2f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1185,13 +1185,6 @@ config TARGET_VEXPRESS64_BASE_FVP select PL01X_SERIAL select SEMIHOSTING -config TARGET_VEXPRESS64_BASER_FVP - bool "Support Versatile Express ARMv8r64 FVP BASE model" - select ARM64 - select DM - select DM_SERIAL - select PL01X_SERIAL - config TARGET_VEXPRESS64_JUNO bool "Support Versatile Express Juno Development Platform" select ARM64 diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index 46625675bdd..3de18c7675b 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -15,7 +15,6 @@ #include <asm/global_data.h> #include <asm/system.h> #include <asm/armv8/mmu.h> -#include <asm/armv8/mpu.h> DECLARE_GLOBAL_DATA_PTR; @@ -366,86 +365,6 @@ __weak u64 get_page_table_size(void) return size; } -static void mpu_clear_regions(void) -{ - int i; - - for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) { - setup_el2_mpu_region(i, 0, 0); - } -} - -static struct mpu_region default_mpu_mem_map[] = {{0,}}; -__weak struct mpu_region *mpu_mem_map = default_mpu_mem_map; - -static void mpu_setup(void) -{ - int i; - - if (current_el() != 2) { - panic("MPU configuration is only supported at EL2"); - } - - set_sctlr(get_sctlr() & ~(CR_M | CR_WXN)); - - asm volatile("msr MAIR_EL2, %0" : : "r" MEMORY_ATTRIBUTES); - - for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) { - setup_el2_mpu_region(i, - PRBAR_ADDRESS(mpu_mem_map[i].start) - | PRBAR_OUTER_SH | PRBAR_AP_RW_ANY, - PRLAR_ADDRESS(mpu_mem_map[i].end) - | mpu_mem_map[i].attrs | PRLAR_EN_BIT - ); - } - - set_sctlr(get_sctlr() | CR_M); -} - -static bool el_has_mmu(void) -{ - uint64_t id_aa64mmfr0; - asm volatile("mrs %0, id_aa64mmfr0_el1" - : "=r" (id_aa64mmfr0) : : "cc"); - uint64_t msa = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_MASK; - uint64_t msa_frac = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_FRAC_MASK; - - switch (msa) { - case ID_AA64MMFR0_EL1_MSA_VMSA: - /* - * VMSA supported in all translation regimes. - * No support for PMSA. - */ - return true; - case ID_AA64MMFR0_EL1_MSA_USE_FRAC: - /* See MSA_frac for the supported MSAs. */ - switch (msa_frac) { - case ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA: - /* - * PMSA not supported in any translation - * regime. - */ - return true; - case ID_AA64MMFR0_EL1_MSA_FRAC_VMSA: - /* - * PMSA supported in all translation - * regimes. No support for VMSA. - */ - case ID_AA64MMFR0_EL1_MSA_FRAC_PMSA: - /* - * PMSA supported in all translation - * regimes. - */ - return false; - default: - panic("Unsupported id_aa64mmfr0_el1 " \ - "MSA_frac value"); - } - default: - panic("Unsupported id_aa64mmfr0_el1 MSA value"); - } -} - void setup_pgtables(void) { int i; @@ -560,13 +479,8 @@ void dcache_enable(void) /* The data cache is not active unless the mmu is enabled */ if (!(get_sctlr() & CR_M)) { invalidate_dcache_all(); - - if (el_has_mmu()) { - __asm_invalidate_tlb_all(); - mmu_setup(); - } else { - mpu_setup(); - } + __asm_invalidate_tlb_all(); + mmu_setup(); } set_sctlr(get_sctlr() | CR_C); @@ -585,11 +499,7 @@ void dcache_disable(void) set_sctlr(sctlr & ~(CR_C|CR_M)); flush_dcache_all(); - - if (el_has_mmu()) - __asm_invalidate_tlb_all(); - else - mpu_clear_regions(); + __asm_invalidate_tlb_all(); } int dcache_status(void) diff --git a/arch/arm/include/asm/armv8/mpu.h b/arch/arm/include/asm/armv8/mpu.h deleted file mode 100644 index c6c8828325e..00000000000 --- a/arch/arm/include/asm/armv8/mpu.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * SPDX-License-Identifier: GPL-2.0+ - * - * (C) Copyright 2021 Arm Limited - */ - -#ifndef _ASM_ARMV8_MPU_H_ -#define _ASM_ARMV8_MPU_H_ - -#include <asm/armv8/mmu.h> -#include <asm/barriers.h> -#include <linux/stringify.h> - -#define PRSELR_EL2 S3_4_c6_c2_1 -#define PRBAR_EL2 S3_4_c6_c8_0 -#define PRLAR_EL2 S3_4_c6_c8_1 -#define MPUIR_EL2 S3_4_c0_c0_4 - -#define PRBAR_ADDRESS(addr) ((addr) & ~(0x3fULL)) - -/* Access permissions */ -#define PRBAR_AP(val) (((val) & 0x3) << 2) -#define PRBAR_AP_RW_HYP PRBAR_AP(0x0) -#define PRBAR_AP_RW_ANY PRBAR_AP(0x1) -#define PRBAR_AP_RO_HYP PRBAR_AP(0x2) -#define PRBAR_AP_RO_ANY PRBAR_AP(0x3) - -/* Shareability */ -#define PRBAR_SH(val) (((val) & 0x3) << 4) -#define PRBAR_NON_SH PRBAR_SH(0x0) -#define PRBAR_OUTER_SH PRBAR_SH(0x2) -#define PRBAR_INNER_SH PRBAR_SH(0x3) - -/* Memory attribute (MAIR idx) */ -#define PRLAR_ATTRIDX(val) (((val) & 0x7) << 1) -#define PRLAR_EN_BIT (0x1) -#define PRLAR_ADDRESS(addr) ((addr) & ~(0x3fULL)) - -#ifndef __ASSEMBLY__ - -static inline void setup_el2_mpu_region(uint8_t region, uint64_t base, uint64_t limit) -{ - asm volatile("msr " __stringify(PRSELR_EL2) ", %0" : : "r" (region)); - isb(); - asm volatile("msr " __stringify(PRBAR_EL2) ", %0" : : "r" (base)); - asm volatile("msr " __stringify(PRLAR_EL2) ", %0" : : "r" (limit)); - dsb(); - isb(); -} - -#endif - -struct mpu_region { - u64 start; - u64 end; - u64 attrs; -}; - -extern struct mpu_region *mpu_mem_map; - -#endif /* _ASM_ARMV8_MPU_H_ */ diff --git a/arch/arm/include/asm/macro.h b/arch/arm/include/asm/macro.h index ecd8221c0dd..e1eefc283f9 100644 --- a/arch/arm/include/asm/macro.h +++ b/arch/arm/include/asm/macro.h @@ -316,23 +316,6 @@ lr .req x30 csel \tmp, \tmp2, \tmp, eq msr hcr_el2, \tmp - /* - * Detect whether the system has a configurable memory system - * architecture at EL1&0 - */ - mrs \tmp, id_aa64mmfr0_el1 - lsr \tmp, \tmp, #48 - and \tmp, \tmp, #((ID_AA64MMFR0_EL1_MSA_MASK | \ - ID_AA64MMFR0_EL1_MSA_FRAC_MASK) >> 48) - cmp \tmp, #((ID_AA64MMFR0_EL1_MSA_USE_FRAC | \ - ID_AA64MMFR0_EL1_MSA_FRAC_VMSA) >> 48) - bne 2f - - /* Ensure the EL1&0 VMSA is enabled */ - mov \tmp, #(VTCR_EL2_MSA) - msr vtcr_el2, \tmp -2: - /* Return to the EL1_SP1 mode from EL2 */ ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\ SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 1ec6237320b..f75eea16b36 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -84,30 +84,6 @@ #define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */ /* - * VTCR_EL2 bits definitions - */ -#define VTCR_EL2_MSA (1 << 31) /* EL1&0 memory architecture */ - -/* - * ID_AA64MMFR0_EL1 bits definitions - */ -#define ID_AA64MMFR0_EL1_MSA_FRAC_MASK (0xFUL << 52) /* Memory system - architecture - frac */ -#define ID_AA64MMFR0_EL1_MSA_FRAC_VMSA (0x2UL << 52) /* EL1&0 supports - VMSA */ -#define ID_AA64MMFR0_EL1_MSA_FRAC_PMSA (0x1UL << 52) /* EL1&0 only - supports PMSA*/ -#define ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA (0x0UL << 52) /* No PMSA - support */ -#define ID_AA64MMFR0_EL1_MSA_MASK (0xFUL << 48) /* Memory system - architecture */ -#define ID_AA64MMFR0_EL1_MSA_USE_FRAC (0xFUL << 48) /* Use MSA_FRAC */ -#define ID_AA64MMFR0_EL1_MSA_VMSA (0x0UL << 48) /* Memory system - architecture - is VMSA */ - -/* * ID_AA64ISAR1_EL1 bits definitions */ #define ID_AA64ISAR1_EL1_GPI (0xF << 28) /* Implementation-defined generic diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index ea9bfe7570f..f60ee3a7e6a 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -317,6 +317,7 @@ __weak void update_os_arch_secondary_cores(uint8_t os_arch) { } +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 static void switch_to_el1(void) { if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && @@ -331,6 +332,7 @@ static void switch_to_el1(void) ES_TO_AARCH64); } #endif +#endif /* Subcommand: GO */ static void boot_jump_linux(bootm_headers_t *images, int flag) @@ -357,33 +359,21 @@ static void boot_jump_linux(bootm_headers_t *images, int flag) update_os_arch_secondary_cores(images->os.arch); -#ifdef CONFIG_ARMV8_MULTIENTRY - int armv8_switch_to_el1 = -1; -#else - int armv8_switch_to_el1 = env_get_yesno("armv8_switch_to_el1"); -#endif #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 - if (armv8_switch_to_el1 == -1) { - armv8_switch_to_el1 = 1; - } -#endif - if (armv8_switch_to_el1 == 1) { + armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0, + (u64)switch_to_el1, ES_TO_AARCH64); +#else + if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && + (images->os.arch == IH_ARCH_ARM)) + armv8_switch_to_el2(0, (u64)gd->bd->bi_arch_number, + (u64)images->ft_addr, 0, + (u64)images->ep, + ES_TO_AARCH32); + else armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0, - (u64)switch_to_el1, ES_TO_AARCH64); - } else { - if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && - (images->os.arch == IH_ARCH_ARM)) - armv8_switch_to_el2(0, - (u64)gd->bd->bi_arch_number, - (u64)images->ft_addr, 0, - (u64)images->ep, - ES_TO_AARCH32); - else - armv8_switch_to_el2((u64)images->ft_addr, - 0, 0, 0, - images->ep, - ES_TO_AARCH64); - } + images->ep, + ES_TO_AARCH64); +#endif } #else unsigned long machid = gd->bd->bi_arch_number; |