diff options
author | Catalin Marinas | 2022-09-30 09:18:22 +0100 |
---|---|---|
committer | Catalin Marinas | 2022-09-30 09:18:22 +0100 |
commit | c704cf27a1adc5fa40fb8e40b5617bdca889a419 (patch) | |
tree | 47d4dc5c6a938ca90e370dc45767ce3971d49616 /arch/arm64 | |
parent | c397623262fada5df2f92a861000957c36f330c1 (diff) | |
parent | ba00c2a04fa5431d204a4183062b30372c062aa7 (diff) |
Merge branch 'for-next/alternatives' into for-next/core
* for-next/alternatives:
: Alternatives (code patching) improvements
arm64: fix the build with binutils 2.27
arm64: avoid BUILD_BUG_ON() in alternative-macros
arm64: alternatives: add shared NOP callback
arm64: alternatives: add alternative_has_feature_*()
arm64: alternatives: have callbacks take a cap
arm64: alternatives: make alt_region const
arm64: alternatives: hoist print out of __apply_alternatives()
arm64: alternatives: proton-pack: prepare for cap changes
arm64: alternatives: kvm: prepare for cap changes
arm64: cpufeature: make cpus_have_cap() noinstr-safe
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/alternative-macros.h | 66 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/lse.h | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/alternative.c | 70 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 44 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/image-vars.h | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/proton-pack.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/va_layout.c | 5 | ||||
-rw-r--r-- | arch/arm64/tools/cpucaps | 2 |
13 files changed, 146 insertions, 95 deletions
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 7e157ab6cd50..b5ac0b85c9bb 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -2,10 +2,22 @@ #ifndef __ASM_ALTERNATIVE_MACROS_H #define __ASM_ALTERNATIVE_MACROS_H +#include <linux/bits.h> +#include <linux/const.h> + #include <asm/cpucaps.h> #include <asm/insn-def.h> -#define ARM64_CB_PATCH ARM64_NCAPS +/* + * Binutils 2.27.0 can't handle a 'UL' suffix on constants, so for the assembly + * macros below we must use we must use `(1 << ARM64_CB_SHIFT)`. + */ +#define ARM64_CB_SHIFT 15 +#define ARM64_CB_BIT BIT(ARM64_CB_SHIFT) + +#if ARM64_NCAPS >= ARM64_CB_BIT +#error "cpucaps have overflown ARM64_CB_BIT" +#endif #ifndef __ASSEMBLY__ @@ -73,8 +85,8 @@ #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) -#define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) +#define ALTERNATIVE_CB(oldinstr, feature, cb) \ + __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb) #else #include <asm/assembler.h> @@ -82,7 +94,7 @@ .macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len .word \orig_offset - . .word \alt_offset - . - .hword \feature + .hword (\feature) .byte \orig_len .byte \alt_len .endm @@ -141,10 +153,10 @@ 661: .endm -.macro alternative_cb cb +.macro alternative_cb cap, cb .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + altinstruction_entry 661f, \cb, (1 << ARM64_CB_SHIFT) | \cap, 662f-661f, 0 .popsection 661: .endm @@ -207,4 +219,46 @@ alternative_endif #define ALTERNATIVE(oldinstr, newinstr, ...) \ _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +static __always_inline bool +alternative_has_feature_likely(unsigned long feature) +{ + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); + + asm_volatile_goto( + ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) + : + : [feature] "i" (feature) + : + : l_no); + + return true; +l_no: + return false; +} + +static __always_inline bool +alternative_has_feature_unlikely(unsigned long feature) +{ + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); + + asm_volatile_goto( + ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) + : + : [feature] "i" (feature) + : + : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_ALTERNATIVE_MACROS_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index cf8e72e733de..e5957a53be39 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -293,7 +293,7 @@ alternative_endif alternative_if_not ARM64_KVM_PROTECTED_MODE ASM_BUG() alternative_else_nop_endif -alternative_cb kvm_compute_final_ctr_el0 +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0 movz \reg, #0 movk \reg, #0, lsl #16 movk \reg, #0, lsl #32 @@ -877,7 +877,7 @@ alternative_endif .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_iter +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: @@ -890,7 +890,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable b .L_spectre_bhb_loop_done\@ // Patched to NOP alternative_cb_end __mitigate_spectre_bhb_loop \tmp @@ -904,7 +904,7 @@ alternative_cb_end stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 @@ -914,7 +914,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_clear_insn #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_clearbhb +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb /* Patched to NOP when not supported */ clearbhb isb diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index a08672286b4c..f73f11b55042 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -6,6 +6,7 @@ #ifndef __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H +#include <asm/alternative-macros.h> #include <asm/cpucaps.h> #include <asm/cputype.h> #include <asm/hwcap.h> @@ -419,12 +420,8 @@ static __always_inline bool is_hyp_code(void) } extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; -extern struct static_key_false arm64_const_caps_ready; -/* ARM64 CAPS + alternative_cb */ -#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) -extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) @@ -440,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void); static __always_inline bool system_capabilities_finalized(void) { - return static_branch_likely(&arm64_const_caps_ready); + return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM); } /* @@ -448,11 +445,11 @@ static __always_inline bool system_capabilities_finalized(void) * * Before the capability is detected, this returns false. */ -static inline bool cpus_have_cap(unsigned int num) +static __always_inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) return false; - return test_bit(num, cpu_hwcaps); + return arch_test_bit(num, cpu_hwcaps); } /* @@ -467,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; - return static_branch_unlikely(&cpu_hwcap_keys[num]); + return alternative_has_feature_unlikely(num); } /* diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b208da3bebec..7784081088e7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -63,7 +63,7 @@ * specific registers encoded in the instructions). */ .macro kern_hyp_va reg -alternative_cb kvm_update_va_mask +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask and \reg, \reg, #1 /* mask with va_mask */ ror \reg, \reg, #1 /* rotate to the first tag bit */ add \reg, \reg, #0 /* insert the low 12 bits of the tag */ @@ -97,7 +97,7 @@ alternative_cb_end hyp_pa \reg, \tmp /* Load kimage_voffset. */ -alternative_cb kvm_get_kimage_voffset +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 @@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) "add %0, %0, #0\n" "add %0, %0, #0, lsl 12\n" "ror %0, %0, #63\n", + ARM64_ALWAYS_SYSTEM, kvm_update_va_mask) : "+r" (v)); return v; diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 29c85810ae69..c503db8e73b0 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -13,14 +13,13 @@ #include <linux/jump_label.h> #include <linux/stringify.h> #include <asm/alternative.h> +#include <asm/alternative-macros.h> #include <asm/atomic_lse.h> #include <asm/cpucaps.h> -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; - static __always_inline bool system_uses_lse_atomics(void) { - return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); + return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS); } #define __lse_ll_sc_body(op, ...) \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index a97775963f35..64045e3ef03a 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -24,6 +24,9 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) +#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT) +#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT) + /* Volatile, as we may be patching the guts of READ_ONCE() */ static volatile int all_alternatives_applied; @@ -136,8 +139,9 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module, - unsigned long *feature_mask) +static void __nocfi __apply_alternatives(const struct alt_region *region, + bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; __le32 *origptr, *updptr; @@ -145,30 +149,27 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + int cap = ALT_CAP(alt); - if (!test_bit(alt->cpufeature, feature_mask)) + if (!test_bit(cap, feature_mask)) continue; - /* Use ARM64_CB_PATCH as an unconditional patch */ - if (alt->cpufeature < ARM64_CB_PATCH && - !cpus_have_cap(alt->cpufeature)) + if (!cpus_have_cap(cap)) continue; - if (alt->cpufeature == ARM64_CB_PATCH) + if (ALT_HAS_CB(alt)) BUG_ON(alt->alt_len != 0); else BUG_ON(alt->alt_len != alt->orig_len); - pr_info_once("patching kernel code\n"); - origptr = ALT_ORIG_PTR(alt); updptr = is_module ? origptr : lm_alias(origptr); nr_inst = alt->orig_len / AARCH64_INSN_SIZE; - if (alt->cpufeature < ARM64_CB_PATCH) - alt_cb = patch_alternative; - else + if (ALT_HAS_CB(alt)) alt_cb = ALT_REPL_PTR(alt); + else + alt_cb = patch_alternative; alt_cb(alt, origptr, updptr, nr_inst); @@ -201,9 +202,9 @@ void apply_alternatives_vdso(void) const struct elf64_hdr *hdr; const struct elf64_shdr *shdr; const struct elf64_shdr *alt; - DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); - bitmap_fill(all_capabilities, ARM64_NPATCHABLE); + bitmap_fill(all_capabilities, ARM64_NCAPS); hdr = (struct elf64_hdr *)vdso_start; shdr = (void *)hdr + hdr->e_shoff; @@ -219,30 +220,31 @@ void apply_alternatives_vdso(void) __apply_alternatives(®ion, false, &all_capabilities[0]); } +static const struct alt_region kernel_alternatives = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, +}; + /* * We might be patching the stop_machine state machine, so implement a * really simple polling protocol here. */ static int __apply_alternatives_multi_stop(void *unused) { - struct alt_region region = { - .begin = (struct alt_instr *)__alt_instructions, - .end = (struct alt_instr *)__alt_instructions_end, - }; - /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { while (!all_alternatives_applied) cpu_relax(); isb(); } else { - DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS); bitmap_complement(remaining_capabilities, boot_capabilities, - ARM64_NPATCHABLE); + ARM64_NCAPS); BUG_ON(all_alternatives_applied); - __apply_alternatives(®ion, false, remaining_capabilities); + __apply_alternatives(&kernel_alternatives, false, + remaining_capabilities); /* Barriers provided by the cache flushing */ all_alternatives_applied = 1; } @@ -252,6 +254,8 @@ static int __apply_alternatives_multi_stop(void *unused) void __init apply_alternatives_all(void) { + pr_info("applying system-wide alternatives\n"); + apply_alternatives_vdso(); /* better not try code patching on a live SMP system */ stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); @@ -264,15 +268,13 @@ void __init apply_alternatives_all(void) */ void __init apply_boot_alternatives(void) { - struct alt_region region = { - .begin = (struct alt_instr *)__alt_instructions, - .end = (struct alt_instr *)__alt_instructions_end, - }; - /* If called on non-boot cpu things could go wrong */ WARN_ON(smp_processor_id() != 0); - __apply_alternatives(®ion, false, &boot_capabilities[0]); + pr_info("applying boot alternatives\n"); + + __apply_alternatives(&kernel_alternatives, false, + &boot_capabilities[0]); } #ifdef CONFIG_MODULES @@ -282,10 +284,18 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; - DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); - bitmap_fill(all_capabilities, ARM64_NPATCHABLE); + bitmap_fill(all_capabilities, ARM64_NCAPS); __apply_alternatives(®ion, true, &all_capabilities[0]); } #endif + +noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + for (int i = 0; i < nr_inst; i++) + updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); +} +EXPORT_SYMBOL(alt_cb_patch_nops); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index bff9280e184f..5d0527ba0804 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -108,8 +108,7 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; -/* Need also bit for ARM64_CB_PATCH */ -DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); +DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS); bool arm64_use_ng_mappings = false; EXPORT_SYMBOL(arm64_use_ng_mappings); @@ -134,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); */ static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; -/* - * Flag to indicate if we have computed the system wide - * capabilities based on the boot time active CPUs. This - * will be used to determine if a new booting CPU should - * go through the verification process to make sure that it - * supports the system capabilities, without using a hotplug - * notifier. This is also used to decide if we could use - * the fast path for checking constant CPU caps. - */ -DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); -EXPORT_SYMBOL(arm64_const_caps_ready); -static inline void finalize_system_capabilities(void) -{ - static_branch_enable(&arm64_const_caps_ready); -} - void dump_cpu_features(void) { /* file-wide pr_fmt adds "CPU features: " prefix */ pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); } -DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); -EXPORT_SYMBOL(cpu_hwcap_keys); - #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ .sign = SIGNED, \ @@ -1392,6 +1372,12 @@ u64 __read_sysreg_by_encoding(u32 sys_id) #include <linux/irqchip/arm-gic-v3.h> static bool +has_always(const struct arm64_cpu_capabilities *entry, int scope) +{ + return true; +} + +static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) { int val = cpuid_feature_extract_field_width(reg, entry->field_pos, @@ -2111,6 +2097,16 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) static const struct arm64_cpu_capabilities arm64_features[] = { { + .capability = ARM64_ALWAYS_BOOT, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_always, + }, + { + .capability = ARM64_ALWAYS_SYSTEM, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_always, + }, + { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, @@ -2953,9 +2949,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask) if (!cpus_have_cap(num)) continue; - /* Ensure cpus_have_const_cap(num) works */ - static_branch_enable(&cpu_hwcap_keys[num]); - if (boot_scope && caps->cpu_enable) /* * Capabilities with SCOPE_BOOT_CPU scope are finalised @@ -3277,9 +3270,6 @@ void __init setup_cpu_features(void) sme_setup(); minsigstksz_setup(); - /* Advertise that we have computed the system capabilities */ - finalize_system_capabilities(); - /* * Check for sane CTR_EL0.CWG value. */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2d73b3e793b2..e28137d64b76 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -114,7 +114,7 @@ * them if required. */ .macro apply_ssbd, state, tmp1, tmp2 -alternative_cb spectre_v4_patch_fw_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable b .L__asm_ssbd_skip\@ // Patched to NOP alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 @@ -123,7 +123,7 @@ alternative_cb_end tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: @@ -175,7 +175,7 @@ alternative_else_nop_endif .macro mte_set_kernel_gcr, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS -alternative_cb kasan_hw_tags_enable +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end mov \tmp, KERNEL_GCR_EL1 @@ -186,7 +186,7 @@ alternative_cb_end .macro mte_set_user_gcr, tsk, tmp, tmp2 #ifdef CONFIG_KASAN_HW_TAGS -alternative_cb kasan_hw_tags_enable +alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable b 1f alternative_cb_end ldr \tmp, [\tsk, #THREAD_MTE_CTRL] diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index afa69e04e75e..4aaa5f3d1f65 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -73,6 +73,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); +KVM_NVHE_ALIAS(alt_cb_patch_nops); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_vgic_global_state); @@ -89,10 +90,6 @@ KVM_NVHE_ALIAS(__icache_flags); /* VMID bits set by the KVM VMID allocator */ KVM_NVHE_ALIAS(kvm_arm_vmid_bits); -/* Kernel symbols needed for cpus_have_final/const_caps checks. */ -KVM_NVHE_ALIAS(arm64_const_caps_ready); -KVM_NVHE_ALIAS(cpu_hwcap_keys); - /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index fe3bc4c1c5ac..ea659a382e24 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -586,7 +586,7 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, if (spectre_v4_mitigations_off()) return; - if (cpus_have_final_cap(ARM64_SSBS)) + if (cpus_have_cap(ARM64_SSBS)) return; if (spectre_v4_mitigations_dynamic()) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 7839d075729b..8f3f93fa119e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -196,7 +196,7 @@ SYM_CODE_END(__kvm_hyp_vector) sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] stp x0, x1, [sp, #(8 * 2)] - alternative_cb spectre_bhb_patch_wa3 + alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3 /* Patched to mov WA3 when supported */ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 alternative_cb_end @@ -216,7 +216,7 @@ SYM_CODE_END(__kvm_hyp_vector) mitigate_spectre_bhb_clear_insn .endif .if \indirect != 0 - alternative_cb kvm_patch_vector_branch + alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch /* * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with: * diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index acdb7b3cc97d..91b22a014610 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -169,7 +169,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt, * dictates it and we don't have any spare bits in the * address), NOP everything after masking the kernel VA. */ - if (has_vhe() || (!tag_val && i > 0)) { + if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN) || (!tag_val && i > 0)) { updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); continue; } @@ -193,7 +193,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt, BUG_ON(nr_inst != 4); - if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe())) + if (!cpus_have_cap(ARM64_SPECTRE_V3A) || + WARN_ON_ONCE(cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))) return; /* diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index f553a7cb1b07..f1c0347ec31a 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -2,6 +2,8 @@ # # Internal CPU capabilities constants, keep this list sorted +ALWAYS_BOOT +ALWAYS_SYSTEM BTI # Unreliable: use system_supports_32bit_el0() instead. HAS_32BIT_EL0_DO_NOT_USE |