diff options
author | Paolo Bonzini | 2019-05-15 23:41:43 +0200 |
---|---|---|
committer | Paolo Bonzini | 2019-05-15 23:41:43 +0200 |
commit | dd53f6102c30a774e0db8e55d49017a38060f6f6 (patch) | |
tree | 82ac5f5dcd56225c70516d82a1612439e8d73669 /arch | |
parent | 59c5c58c5b93285753d5c1de34d2e00039c27bc0 (diff) | |
parent | 9eecfc22e0bfc7a4c8ca007f083f0ae492d6e891 (diff) |
Merge tag 'kvmarm-for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for 5.2
- guest SVE support
- guest Pointer Authentication support
- Better discrimination of perf counters between host and guests
Conflicts:
include/uapi/linux/kvm.h
Diffstat (limited to 'arch')
26 files changed, 1592 insertions, 171 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8927cae7c966..efb0e2c0d84c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -343,4 +343,6 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, } } +static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 770d73257ad9..075e1921fdd9 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -19,6 +19,7 @@ #ifndef __ARM_KVM_HOST_H__ #define __ARM_KVM_HOST_H__ +#include <linux/errno.h> #include <linux/types.h> #include <linux/kvm_types.h> #include <asm/cputype.h> @@ -53,6 +54,8 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +static inline int kvm_arm_init_sve(void) { return 0; } + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -150,9 +153,13 @@ struct kvm_cpu_context { u32 cp15[NR_CP15_REGS]; }; -typedef struct kvm_cpu_context kvm_cpu_context_t; +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; +}; + +typedef struct kvm_host_data kvm_host_data_t; -static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, int cpu) { /* The host's MPIDR is immutable, so let's set it up at boot time */ @@ -182,7 +189,7 @@ struct kvm_vcpu_arch { struct kvm_vcpu_fault_info fault; /* Host FP context */ - kvm_cpu_context_t *host_cpu_context; + struct kvm_cpu_context *host_cpu_context; /* VGIC state */ struct vgic_cpu vgic_cpu; @@ -361,6 +368,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} + static inline void kvm_arm_vhe_guest_enter(void) {} static inline void kvm_arm_vhe_guest_exit(void) {} @@ -409,4 +419,14 @@ static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) return 0; } +static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) +{ + return -EINVAL; +} + +static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) +{ + return true; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7e34b9eba5de..39470784a50c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1288,6 +1288,7 @@ menu "ARMv8.3 architectural features" config ARM64_PTR_AUTH bool "Enable support for pointer authentication" default y + depends on !KVM || ARM64_VHE help Pointer authentication (part of the ARMv8.3 Extensions) provides instructions for signing and authenticating pointers against secret @@ -1301,8 +1302,9 @@ config ARM64_PTR_AUTH context-switched along with the process. The feature is detected at runtime. If the feature is not present in - hardware it will not be advertised to userspace nor will it be - enabled. + hardware it will not be advertised to userspace/KVM guest nor will it + be enabled. However, KVM guest also require VHE mode and hence + CONFIG_ARM64_VHE=y option to use this feature. endmenu diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index dd1ad3950ef5..df62bbd33a9a 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -24,10 +24,13 @@ #ifndef __ASSEMBLY__ +#include <linux/bitmap.h> #include <linux/build_bug.h> +#include <linux/bug.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/stddef.h> +#include <linux/types.h> #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* Masks for extracting the FPSR and FPCR from the FPSCR */ @@ -56,7 +59,8 @@ extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_bind_task_to_cpu(void); -extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state); +extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, + void *sve_state, unsigned int sve_vl); extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_flush_cpu_state(void); @@ -87,6 +91,29 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); extern int __ro_after_init sve_max_vl; +extern int __ro_after_init sve_max_virtualisable_vl; +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); + +/* + * Helpers to translate bit indices in sve_vq_map to VQ values (and + * vice versa). This allows find_next_bit() to be used to find the + * _maximum_ VQ not exceeding a certain value. + */ +static inline unsigned int __vq_to_bit(unsigned int vq) +{ + return SVE_VQ_MAX - vq; +} + +static inline unsigned int __bit_to_vq(unsigned int bit) +{ + return SVE_VQ_MAX - bit; +} + +/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */ +static inline bool sve_vq_available(unsigned int vq) +{ + return test_bit(__vq_to_bit(vq), sve_vq_map); +} #ifdef CONFIG_ARM64_SVE diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f5b79e995f40..ff73f5462aca 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void); .endm .macro get_host_ctxt reg, tmp - hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp + hyp_adr_this_cpu \reg, kvm_host_data, \tmp + add \reg, \reg, #HOST_DATA_CONTEXT .endm .macro get_vcpu_ptr vcpu, ctxt diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d3842791e1c4..613427fafff9 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -98,6 +98,22 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 |= HCR_TWE; } +static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); +} + +static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); +} + +static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) +{ + if (vcpu_has_ptrauth(vcpu)) + vcpu_ptrauth_disable(vcpu); +} + static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a01fe087e022..2a8d3f8ca22c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -22,9 +22,13 @@ #ifndef __ARM64_KVM_HOST_H__ #define __ARM64_KVM_HOST_H__ +#include <linux/bitmap.h> #include <linux/types.h> +#include <linux/jump_label.h> #include <linux/kvm_types.h> +#include <linux/percpu.h> #include <asm/arch_gicv3.h> +#include <asm/barrier.h> #include <asm/cpufeature.h> #include <asm/daifflags.h> #include <asm/fpsimd.h> @@ -45,7 +49,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 4 +#define KVM_VCPU_MAX_FEATURES 7 #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) @@ -54,8 +58,12 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +extern unsigned int kvm_sve_max_vl; +int kvm_arm_init_sve(void); + int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); @@ -117,6 +125,7 @@ enum vcpu_sysreg { SCTLR_EL1, /* System Control Register */ ACTLR_EL1, /* Auxiliary Control Register */ CPACR_EL1, /* Coprocessor Access Control */ + ZCR_EL1, /* SVE Control */ TTBR0_EL1, /* Translation Table Base Register 0 */ TTBR1_EL1, /* Translation Table Base Register 1 */ TCR_EL1, /* Translation Control Register */ @@ -152,6 +161,18 @@ enum vcpu_sysreg { PMSWINC_EL0, /* Software Increment Register */ PMUSERENR_EL0, /* User Enable Register */ + /* Pointer Authentication Registers in a strict increasing order. */ + APIAKEYLO_EL1, + APIAKEYHI_EL1, + APIBKEYLO_EL1, + APIBKEYHI_EL1, + APDAKEYLO_EL1, + APDAKEYHI_EL1, + APDBKEYLO_EL1, + APDBKEYHI_EL1, + APGAKEYLO_EL1, + APGAKEYHI_EL1, + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ @@ -212,7 +233,17 @@ struct kvm_cpu_context { struct kvm_vcpu *__hyp_running_vcpu; }; -typedef struct kvm_cpu_context kvm_cpu_context_t; +struct kvm_pmu_events { + u32 events_host; + u32 events_guest; +}; + +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; + struct kvm_pmu_events pmu_events; +}; + +typedef struct kvm_host_data kvm_host_data_t; struct vcpu_reset_state { unsigned long pc; @@ -223,6 +254,8 @@ struct vcpu_reset_state { struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; + void *sve_state; + unsigned int sve_max_vl; /* HYP configuration */ u64 hcr_el2; @@ -255,7 +288,7 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch external_debug_state; /* Pointer to host CPU context */ - kvm_cpu_context_t *host_cpu_context; + struct kvm_cpu_context *host_cpu_context; struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ @@ -318,12 +351,40 @@ struct kvm_vcpu_arch { bool sysregs_loaded_on_cpu; }; +/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ +#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ + sve_ffr_offset((vcpu)->arch.sve_max_vl))) + +#define vcpu_sve_state_size(vcpu) ({ \ + size_t __size_ret; \ + unsigned int __vcpu_vq; \ + \ + if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ + __size_ret = 0; \ + } else { \ + __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ + __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ + } \ + \ + __size_ret; \ +}) + /* vcpu_arch flags field values: */ #define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ +#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ +#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ +#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ + +#define vcpu_has_sve(vcpu) (system_supports_sve() && \ + ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) + +#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ + system_supports_generic_auth()) && \ + ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) @@ -432,9 +493,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); +DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); -static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, int cpu) { /* The host's MPIDR is immutable, so let's set it up at boot time */ @@ -452,8 +513,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * kernel's mapping to the linear mapping, and store it in tpidr_el2 * so that we can use adr_l to access per-cpu variables in EL2. */ - u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - - (u64)kvm_ksym_ref(kvm_host_cpu_state)); + u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) - + (u64)kvm_ksym_ref(kvm_host_data)); /* * Call initialization code, and switch to the full blown HYP code. @@ -491,9 +552,10 @@ static inline bool kvm_arch_requires_vhe(void) return false; } +void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); + static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} @@ -516,11 +578,28 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) +{ + return (!has_vhe() && attr->exclude_host); +} + #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { return kvm_arch_vcpu_run_map_fp(vcpu); } + +void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); +void kvm_clr_pmu_events(u32 clr); + +void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt); +bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt); + +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); +#else +static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} +static inline void kvm_clr_pmu_events(u32 clr) {} #endif static inline void kvm_arm_vhe_guest_enter(void) @@ -594,4 +673,10 @@ void kvm_arch_free_vm(struct kvm *kvm); int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); +bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); + +#define kvm_arm_vcpu_sve_finalized(vcpu) \ + ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4da765f2cca5..ef8b8394d3d1 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -149,7 +149,6 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -bool __fpsimd_enabled(void); void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void deactivate_traps_vhe_put(void); diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h new file mode 100644 index 000000000000..6301813dcace --- /dev/null +++ b/arch/arm64/include/asm/kvm_ptrauth.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore + * Copyright 2019 Arm Limited + * Authors: Mark Rutland <mark.rutland@arm.com> + * Amit Daniel Kachhap <amit.kachhap@arm.com> + */ + +#ifndef __ASM_KVM_PTRAUTH_H +#define __ASM_KVM_PTRAUTH_H + +#ifdef __ASSEMBLY__ + +#include <asm/sysreg.h> + +#ifdef CONFIG_ARM64_PTR_AUTH + +#define PTRAUTH_REG_OFFSET(x) (x - CPU_APIAKEYLO_EL1) + +/* + * CPU_AP*_EL1 values exceed immediate offset range (512) for stp + * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and + * calculates the offset of the keys from this base to avoid an extra add + * instruction. These macros assumes the keys offsets follow the order of + * the sysreg enum in kvm_host.h. + */ +.macro ptrauth_save_state base, reg1, reg2 + mrs_s \reg1, SYS_APIAKEYLO_EL1 + mrs_s \reg2, SYS_APIAKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)] + mrs_s \reg1, SYS_APIBKEYLO_EL1 + mrs_s \reg2, SYS_APIBKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)] + mrs_s \reg1, SYS_APDAKEYLO_EL1 + mrs_s \reg2, SYS_APDAKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)] + mrs_s \reg1, SYS_APDBKEYLO_EL1 + mrs_s \reg2, SYS_APDBKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)] + mrs_s \reg1, SYS_APGAKEYLO_EL1 + mrs_s \reg2, SYS_APGAKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)] +.endm + +.macro ptrauth_restore_state base, reg1, reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)] + msr_s SYS_APIAKEYLO_EL1, \reg1 + msr_s SYS_APIAKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)] + msr_s SYS_APIBKEYLO_EL1, \reg1 + msr_s SYS_APIBKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)] + msr_s SYS_APDAKEYLO_EL1, \reg1 + msr_s SYS_APDAKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)] + msr_s SYS_APDBKEYLO_EL1, \reg1 + msr_s SYS_APDBKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)] + msr_s SYS_APGAKEYLO_EL1, \reg1 + msr_s SYS_APGAKEYHI_EL1, \reg2 +.endm + +/* + * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will + * check for the presence of one of the cpufeature flag + * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and + * then proceed ahead with the save/restore of Pointer Authentication + * key registers. + */ +.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 +alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH + b 1000f +alternative_else_nop_endif +alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF + b 1001f +alternative_else_nop_endif +1000: + ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] + and \reg1, \reg1, #(HCR_API | HCR_APK) + cbz \reg1, 1001f + add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 + ptrauth_restore_state \reg1, \reg2, \reg3 +1001: +.endm + +.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 +alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH + b 2000f +alternative_else_nop_endif +alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF + b 2001f +alternative_else_nop_endif +2000: + ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] + and \reg1, \reg1, #(HCR_API | HCR_APK) + cbz \reg1, 2001f + add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 + ptrauth_save_state \reg1, \reg2, \reg3 + add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1 + ptrauth_restore_state \reg1, \reg2, \reg3 + isb +2001: +.endm + +#else /* !CONFIG_ARM64_PTR_AUTH */ +.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 +.endm +.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 +.endm +#endif /* CONFIG_ARM64_PTR_AUTH */ +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_KVM_PTRAUTH_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5b267dec6194..4d6262df79bb 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -454,6 +454,9 @@ #define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) +/* VHE encodings for architectural EL0/1 system registers */ +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_DSSBS (_BITUL(44)) #define SCTLR_ELx_ENIA (_BITUL(31)) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 97c3478ee6e7..7b7ac0f6cec9 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -35,6 +35,7 @@ #include <linux/psci.h> #include <linux/types.h> #include <asm/ptrace.h> +#include <asm/sve_context.h> #define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE @@ -102,6 +103,9 @@ struct kvm_regs { #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ +#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ +#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ +#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ struct kvm_vcpu_init { __u32 target; @@ -226,6 +230,45 @@ struct kvm_vcpu_events { KVM_REG_ARM_FW | ((r) & 0xffff)) #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) +/* SVE registers */ +#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) + +/* Z- and P-regs occupy blocks at the following offsets within this range: */ +#define KVM_REG_ARM64_SVE_ZREG_BASE 0 +#define KVM_REG_ARM64_SVE_PREG_BASE 0x400 +#define KVM_REG_ARM64_SVE_FFR_BASE 0x600 + +#define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS +#define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS + +#define KVM_ARM64_SVE_MAX_SLICES 32 + +#define KVM_REG_ARM64_SVE_ZREG(n, i) \ + (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \ + KVM_REG_SIZE_U2048 | \ + (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \ + ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) + +#define KVM_REG_ARM64_SVE_PREG(n, i) \ + (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \ + KVM_REG_SIZE_U256 | \ + (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \ + ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) + +#define KVM_REG_ARM64_SVE_FFR(i) \ + (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \ + KVM_REG_SIZE_U256 | \ + ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) + +#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN +#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX + +/* Vector lengths pseudo-register: */ +#define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \ + KVM_REG_SIZE_U512 | 0xffff) +#define KVM_ARM64_SVE_VLS_WORDS \ + ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7f40dcbdd51d..768b23101ff0 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -125,9 +125,16 @@ int main(void) DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); + DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); + DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); + DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); + DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); + DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); + DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4061de10cea6..7f8cc51f0740 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1863,7 +1863,7 @@ static void verify_sve_features(void) unsigned int len = zcr & ZCR_ELx_LEN_MASK; if (len < safe_len || sve_verify_vq_map()) { - pr_crit("CPU%d: SVE: required vector length(s) missing\n", + pr_crit("CPU%d: SVE: vector length support mismatch\n", smp_processor_id()); cpu_die_early(); } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 5ebe73b69961..56afa40263d9 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -18,6 +18,7 @@ */ #include <linux/bitmap.h> +#include <linux/bitops.h> #include <linux/bottom_half.h> #include <linux/bug.h> #include <linux/cache.h> @@ -48,6 +49,7 @@ #include <asm/sigcontext.h> #include <asm/sysreg.h> #include <asm/traps.h> +#include <asm/virt.h> #define FPEXC_IOF (1 << 0) #define FPEXC_DZF (1 << 1) @@ -119,6 +121,8 @@ */ struct fpsimd_last_state_struct { struct user_fpsimd_state *st; + void *sve_state; + unsigned int sve_vl; }; static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); @@ -130,14 +134,23 @@ static int sve_default_vl = -1; /* Maximum supported vector length across all CPUs (initially poisoned) */ int __ro_after_init sve_max_vl = SVE_VL_MIN; -/* Set of available vector lengths, as vq_to_bit(vq): */ -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; + +/* + * Set of available vector lengths, + * where length vq encoded as bit __vq_to_bit(vq): + */ +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +/* Set of vector lengths present on at least one cpu: */ +static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); + static void __percpu *efi_sve_state; #else /* ! CONFIG_ARM64_SVE */ /* Dummy declaration for code that will be optimised out: */ extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); extern void __percpu *efi_sve_state; #endif /* ! CONFIG_ARM64_SVE */ @@ -235,14 +248,15 @@ static void task_fpsimd_load(void) */ void fpsimd_save(void) { - struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); + struct fpsimd_last_state_struct const *last = + this_cpu_ptr(&fpsimd_last_state); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ WARN_ON(!in_softirq() && !irqs_disabled()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { if (system_supports_sve() && test_thread_flag(TIF_SVE)) { - if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { + if (WARN_ON(sve_get_vl() != last->sve_vl)) { /* * Can't save the user regs, so current would * re-enter user with corrupt state. @@ -252,32 +266,15 @@ void fpsimd_save(void) return; } - sve_save_state(sve_pffr(¤t->thread), &st->fpsr); + sve_save_state((char *)last->sve_state + + sve_ffr_offset(last->sve_vl), + &last->st->fpsr); } else - fpsimd_save_state(st); + fpsimd_save_state(last->st); } } /* - * Helpers to translate bit indices in sve_vq_map to VQ values (and - * vice versa). This allows find_next_bit() to be used to find the - * _maximum_ VQ not exceeding a certain value. - */ - -static unsigned int vq_to_bit(unsigned int vq) -{ - return SVE_VQ_MAX - vq; -} - -static unsigned int bit_to_vq(unsigned int bit) -{ - if (WARN_ON(bit >= SVE_VQ_MAX)) - bit = SVE_VQ_MAX - 1; - - return SVE_VQ_MAX - bit; -} - -/* * All vector length selection from userspace comes through here. * We're on a slow path, so some sanity-checks are included. * If things go wrong there's a bug somewhere, but try to fall back to a @@ -298,8 +295,8 @@ static unsigned int find_supported_vector_length(unsigned int vl) vl = max_vl; bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, - vq_to_bit(sve_vq_from_vl(vl))); - return sve_vl_from_vq(bit_to_vq(bit)); + __vq_to_bit(sve_vq_from_vl(vl))); + return sve_vl_from_vq(__bit_to_vq(bit)); } #ifdef CONFIG_SYSCTL @@ -550,7 +547,6 @@ int sve_set_vector_length(struct task_struct *task, local_bh_disable(); fpsimd_save(); - set_thread_flag(TIF_FOREIGN_FPSTATE); } fpsimd_flush_task_state(task); @@ -624,12 +620,6 @@ int sve_get_current_vl(void) return sve_prctl_status(0); } -/* - * Bitmap for temporary storage of the per-CPU set of supported vector lengths - * during secondary boot. - */ -static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); - static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) { unsigned int vq, vl; @@ -644,40 +634,82 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ vl = sve_get_vl(); vq = sve_vq_from_vl(vl); /* skip intervening lengths */ - set_bit(vq_to_bit(vq), map); + set_bit(__vq_to_bit(vq), map); } } +/* + * Initialise the set of known supported VQs for the boot CPU. + * This is called during kernel boot, before secondary CPUs are brought up. + */ void __init sve_init_vq_map(void) { sve_probe_vqs(sve_vq_map); + bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); } /* * If we haven't committed to the set of supported VQs yet, filter out * those not supported by the current CPU. + * This function is called during the bring-up of early secondary CPUs only. */ void sve_update_vq_map(void) { - sve_probe_vqs(sve_secondary_vq_map); - bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + + sve_probe_vqs(tmp_map); + bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); + bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); } -/* Check whether the current CPU supports all VQs in the committed set */ +/* + * Check whether the current CPU supports all VQs in the committed set. + * This function is called during the bring-up of late secondary CPUs only. + */ int sve_verify_vq_map(void) { - int ret = 0; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + unsigned long b; - sve_probe_vqs(sve_secondary_vq_map); - bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, - SVE_VQ_MAX); - if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { + sve_probe_vqs(tmp_map); + + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); + if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { pr_warn("SVE: cpu%d: Required vector length(s) missing\n", smp_processor_id()); - ret = -EINVAL; + return -EINVAL; } - return ret; + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) + return 0; + + /* + * For KVM, it is necessary to ensure that this CPU doesn't + * support any vector length that guests may have probed as + * unsupported. + */ + + /* Recover the set of supported VQs: */ + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); + /* Find VQs supported that are not globally supported: */ + bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); + + /* Find the lowest such VQ, if any: */ + b = find_last_bit(tmp_map, SVE_VQ_MAX); + if (b >= SVE_VQ_MAX) + return 0; /* no mismatches */ + + /* + * Mismatches above sve_max_virtualisable_vl are fine, since + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: + */ + if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { + pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", + smp_processor_id()); + return -EINVAL; + } + + return 0; } static void __init sve_efi_setup(void) @@ -744,6 +776,8 @@ u64 read_zcr_features(void) void __init sve_setup(void) { u64 zcr; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + unsigned long b; if (!system_supports_sve()) return; @@ -753,8 +787,8 @@ void __init sve_setup(void) * so sve_vq_map must have at least SVE_VQ_MIN set. * If something went wrong, at least try to patch it up: */ - if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) - set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) + set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); @@ -772,11 +806,31 @@ void __init sve_setup(void) */ sve_default_vl = find_supported_vector_length(64); + bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, + SVE_VQ_MAX); + + b = find_last_bit(tmp_map, SVE_VQ_MAX); + if (b >= SVE_VQ_MAX) + /* No non-virtualisable VLs found */ + sve_max_virtualisable_vl = SVE_VQ_MAX; + else if (WARN_ON(b == SVE_VQ_MAX - 1)) + /* No virtualisable VLs? This is architecturally forbidden. */ + sve_max_virtualisable_vl = SVE_VQ_MIN; + else /* b + 1 < SVE_VQ_MAX */ + sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); + + if (sve_max_virtualisable_vl > sve_max_vl) + sve_max_virtualisable_vl = sve_max_vl; + pr_info("SVE: maximum available vector length %u bytes per vector\n", sve_max_vl); pr_info("SVE: default vector length %u bytes per vector\n", sve_default_vl); + /* KVM decides whether to support mismatched systems. Just warn here: */ + if (sve_max_virtualisable_vl < sve_max_vl) + pr_warn("SVE: unvirtualisable vector lengths present\n"); + sve_efi_setup(); } @@ -816,12 +870,11 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) local_bh_disable(); fpsimd_save(); - fpsimd_to_sve(current); /* Force ret_to_user to reload the registers: */ fpsimd_flush_task_state(current); - set_thread_flag(TIF_FOREIGN_FPSTATE); + fpsimd_to_sve(current); if (test_and_set_thread_flag(TIF_SVE)) WARN_ON(1); /* SVE access shouldn't have trapped */ @@ -894,9 +947,9 @@ void fpsimd_flush_thread(void) local_bh_disable(); + fpsimd_flush_task_state(current); memset(¤t->thread.uw.fpsimd_state, 0, sizeof(current->thread.uw.fpsimd_state)); - fpsimd_flush_task_state(current); if (system_supports_sve()) { clear_thread_flag(TIF_SVE); @@ -933,8 +986,6 @@ void fpsimd_flush_thread(void) current->thread.sve_vl_onexec = 0; } - set_thread_flag(TIF_FOREIGN_FPSTATE); - local_bh_enable(); } @@ -974,6 +1025,8 @@ void fpsimd_bind_task_to_cpu(void) this_cpu_ptr(&fpsimd_last_state); last->st = ¤t->thread.uw.fpsimd_state; + last->sve_state = current->thread.sve_state; + last->sve_vl = current->thread.sve_vl; current->thread.fpsimd_cpu = smp_processor_id(); if (system_supports_sve()) { @@ -987,7 +1040,8 @@ void fpsimd_bind_task_to_cpu(void) } } -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, + unsigned int sve_vl) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); @@ -995,6 +1049,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) WARN_ON(!in_softirq() && !irqs_disabled()); last->st = st; + last->sve_state = sve_state; + last->sve_vl = sve_vl; } /* @@ -1043,12 +1099,29 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) /* * Invalidate live CPU copies of task t's FPSIMD state + * + * This function may be called with preemption enabled. The barrier() + * ensures that the assignment to fpsimd_cpu is visible to any + * preemption/softirq that could race with set_tsk_thread_flag(), so + * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. + * + * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any + * subsequent code. */ void fpsimd_flush_task_state(struct task_struct *t) { t->thread.fpsimd_cpu = NR_CPUS; + + barrier(); + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); + + barrier(); } +/* + * Invalidate any task's FPSIMD state that is present on this cpu. + * This function must be called with softirqs disabled. + */ void fpsimd_flush_cpu_state(void) { __this_cpu_write(fpsimd_last_state.st, NULL); diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 4addb38bc250..314b1adedf06 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -26,6 +26,7 @@ #include <linux/acpi.h> #include <linux/clocksource.h> +#include <linux/kvm_host.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> @@ -528,12 +529,21 @@ static inline int armv8pmu_enable_counter(int idx) static inline void armv8pmu_enable_event_counter(struct perf_event *event) { + struct perf_event_attr *attr = &event->attr; int idx = event->hw.idx; + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); - armv8pmu_enable_counter(idx); if (armv8pmu_event_is_chained(event)) - armv8pmu_enable_counter(idx - 1); - isb(); + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + + kvm_set_pmu_events(counter_bits, attr); + + /* We rely on the hypervisor switch code to enable guest counters */ + if (!kvm_pmu_counter_deferred(attr)) { + armv8pmu_enable_counter(idx); + if (armv8pmu_event_is_chained(event)) + armv8pmu_enable_counter(idx - 1); + } } static inline int armv8pmu_disable_counter(int idx) @@ -546,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx) static inline void armv8pmu_disable_event_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + struct perf_event_attr *attr = &event->attr; int idx = hwc->idx; + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); if (armv8pmu_event_is_chained(event)) - armv8pmu_disable_counter(idx - 1); - armv8pmu_disable_counter(idx); + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + + kvm_clr_pmu_events(counter_bits); + + /* We rely on the hypervisor switch code to disable guest counters */ + if (!kvm_pmu_counter_deferred(attr)) { + if (armv8pmu_event_is_chained(event)) + armv8pmu_disable_counter(idx - 1); + armv8pmu_disable_counter(idx); + } } static inline int armv8pmu_enable_intens(int idx) @@ -827,14 +847,23 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, * with other architectures (x86 and Power). */ if (is_kernel_in_hyp_mode()) { - if (!attr->exclude_kernel) + if (!attr->exclude_kernel && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; - } else { - if (attr->exclude_kernel) + if (attr->exclude_guest) config_base |= ARMV8_PMU_EXCLUDE_EL1; - if (!attr->exclude_hv) + if (attr->exclude_host) + config_base |= ARMV8_PMU_EXCLUDE_EL0; + } else { + if (!attr->exclude_hv && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; } + + /* + * Filter out !VHE kernels and guest kernels + */ + if (attr->exclude_kernel) + config_base |= ARMV8_PMU_EXCLUDE_EL1; + if (attr->exclude_user) config_base |= ARMV8_PMU_EXCLUDE_EL0; @@ -864,6 +893,9 @@ static void armv8pmu_reset(void *info) armv8pmu_disable_intens(idx); } + /* Clear the counters we flip at guest entry/exit */ + kvm_clr_pmu_events(U32_MAX); + /* * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter(). diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 867a7cea70e5..a9b0485df074 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -296,11 +296,6 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) */ fpsimd_flush_task_state(current); - barrier(); - /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */ - - set_thread_flag(TIF_FOREIGN_FPSTATE); - barrier(); /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ sve_alloc(current); diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 690e033a91c0..3ac1a64d2fb9 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o +kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index aac7808ce216..6e3c9c8b2df9 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -9,6 +9,7 @@ #include <linux/sched.h> #include <linux/thread_info.h> #include <linux/kvm_host.h> +#include <asm/fpsimd.h> #include <asm/kvm_asm.h> #include <asm/kvm_host.h> #include <asm/kvm_mmu.h> @@ -85,9 +86,12 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) WARN_ON_ONCE(!irqs_disabled()); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs); + fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, + vcpu->arch.sve_state, + vcpu->arch.sve_max_vl); + clear_thread_flag(TIF_FOREIGN_FPSTATE); - clear_thread_flag(TIF_SVE); + update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); } } @@ -100,14 +104,21 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { unsigned long flags; + bool host_has_sve = system_supports_sve(); + bool guest_has_sve = vcpu_has_sve(vcpu); local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { + u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1]; + /* Clean guest FP state to memory and invalidate cpu view */ fpsimd_save(); fpsimd_flush_cpu_state(); - } else if (system_supports_sve()) { + + if (guest_has_sve) + *guest_zcr = read_sysreg_s(SYS_ZCR_EL12); + } else if (host_has_sve) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index dd436a50fce7..3ae2f82fca46 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -19,18 +19,25 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/bits.h> #include <linux/errno.h> #include <linux/err.h> +#include <linux/nospec.h> #include <linux/kvm_host.h> #include <linux/module.h> +#include <linux/stddef.h> +#include <linux/string.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <kvm/arm_psci.h> #include <asm/cputype.h> #include <linux/uaccess.h> +#include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_host.h> +#include <asm/sigcontext.h> #include "trace.h" @@ -52,12 +59,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } +static bool core_reg_offset_is_vreg(u64 off) +{ + return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && + off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); +} + static u64 core_reg_offset_from_id(u64 id) { return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); } -static int validate_core_offset(const struct kvm_one_reg *reg) +static int validate_core_offset(const struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) { u64 off = core_reg_offset_from_id(reg->id); int size; @@ -89,11 +103,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg) return -EINVAL; } - if (KVM_REG_SIZE(reg->id) == size && - IS_ALIGNED(off, size / sizeof(__u32))) - return 0; + if (KVM_REG_SIZE(reg->id) != size || + !IS_ALIGNED(off, size / sizeof(__u32))) + return -EINVAL; - return -EINVAL; + /* + * The KVM_REG_ARM64_SVE regs must be used instead of + * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on + * SVE-enabled vcpus: + */ + if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) + return -EINVAL; + + return 0; } static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) @@ -115,7 +137,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; - if (validate_core_offset(reg)) + if (validate_core_offset(vcpu, reg)) return -EINVAL; if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) @@ -140,7 +162,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; - if (validate_core_offset(reg)) + if (validate_core_offset(vcpu, reg)) return -EINVAL; if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) @@ -183,6 +205,239 @@ out: return err; } +#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) +#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) + +static bool vq_present( + const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS], + unsigned int vq) +{ + return (*vqs)[vq_word(vq)] & vq_mask(vq); +} + +static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + unsigned int max_vq, vq; + u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; + + if (!vcpu_has_sve(vcpu)) + return -ENOENT; + + if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) + return -EINVAL; + + memset(vqs, 0, sizeof(vqs)); + + max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) + if (sve_vq_available(vq)) + vqs[vq_word(vq)] |= vq_mask(vq); + + if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) + return -EFAULT; + + return 0; +} + +static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + unsigned int max_vq, vq; + u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; + + if (!vcpu_has_sve(vcpu)) + return -ENOENT; + + if (kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; /* too late! */ + + if (WARN_ON(vcpu->arch.sve_state)) + return -EINVAL; + + if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) + return -EFAULT; + + max_vq = 0; + for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) + if (vq_present(&vqs, vq)) + max_vq = vq; + + if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) + return -EINVAL; + + /* + * Vector lengths supported by the host can't currently be + * hidden from the guest individually: instead we can only set a + * maxmium via ZCR_EL2.LEN. So, make sure the available vector + * lengths match the set requested exactly up to the requested + * maximum: + */ + for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) + if (vq_present(&vqs, vq) != sve_vq_available(vq)) + return -EINVAL; + + /* Can't run with no vector lengths at all: */ + if (max_vq < SVE_VQ_MIN) + return -EINVAL; + + /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ + vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); + + return 0; +} + +#define SVE_REG_SLICE_SHIFT 0 +#define SVE_REG_SLICE_BITS 5 +#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) +#define SVE_REG_ID_BITS 5 + +#define SVE_REG_SLICE_MASK \ + GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ + SVE_REG_SLICE_SHIFT) +#define SVE_REG_ID_MASK \ + GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) + +#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) + +#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) +#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) + +/* + * Number of register slices required to cover each whole SVE register. + * NOTE: Only the first slice every exists, for now. + * If you are tempted to modify this, you must also rework sve_reg_to_region() + * to match: + */ +#define vcpu_sve_slices(vcpu) 1 + +/* Bounds of a single SVE register slice within vcpu->arch.sve_state */ +struct sve_state_reg_region { + unsigned int koffset; /* offset into sve_state in kernel memory */ + unsigned int klen; /* length in kernel memory */ + unsigned int upad; /* extra trailing padding in user memory */ +}; + +/* + * Validate SVE register ID and get sanitised bounds for user/kernel SVE + * register copy + */ +static int sve_reg_to_region(struct sve_state_reg_region *region, + struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + /* reg ID ranges for Z- registers */ + const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); + const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, + SVE_NUM_SLICES - 1); + + /* reg ID ranges for P- registers and FFR (which are contiguous) */ + const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); + const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); + + unsigned int vq; + unsigned int reg_num; + + unsigned int reqoffset, reqlen; /* User-requested offset and length */ + unsigned int maxlen; /* Maxmimum permitted length */ + + size_t sve_state_size; + + const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, + SVE_NUM_SLICES - 1); + + /* Verify that the P-regs and FFR really do have contiguous IDs: */ + BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); + + /* Verify that we match the UAPI header: */ + BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); + + reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; + + if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { + if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) + return -ENOENT; + + vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + + reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - + SVE_SIG_REGS_OFFSET; + reqlen = KVM_SVE_ZREG_SIZE; + maxlen = SVE_SIG_ZREG_SIZE(vq); + } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { + if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) + return -ENOENT; + + vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + + reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - + SVE_SIG_REGS_OFFSET; + reqlen = KVM_SVE_PREG_SIZE; + maxlen = SVE_SIG_PREG_SIZE(vq); + } else { + return -EINVAL; + } + + sve_state_size = vcpu_sve_state_size(vcpu); + if (WARN_ON(!sve_state_size)) + return -EINVAL; + + region->koffset = array_index_nospec(reqoffset, sve_state_size); + region->klen = min(maxlen, reqlen); + region->upad = reqlen - region->klen; + + return 0; +} + +static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret; + struct sve_state_reg_region region; + char __user *uptr = (char __user *)reg->addr; + + /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ + if (reg->id == KVM_REG_ARM64_SVE_VLS) + return get_sve_vls(vcpu, reg); + + /* Try to interpret reg ID as an architectural SVE register... */ + ret = sve_reg_to_region(®ion, vcpu, reg); + if (ret) + return ret; + + if (!kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; + + if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, + region.klen) || + clear_user(uptr + region.klen, region.upad)) + return -EFAULT; + + return 0; +} + +static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret; + struct sve_state_reg_region region; + const char __user *uptr = (const char __user *)reg->addr; + + /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ + if (reg->id == KVM_REG_ARM64_SVE_VLS) + return set_sve_vls(vcpu, reg); + + /* Try to interpret reg ID as an architectural SVE register... */ + ret = sve_reg_to_region(®ion, vcpu, reg); + if (ret) + return ret; + + if (!kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; + + if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, + region.klen)) + return -EFAULT; + + return 0; +} + int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { return -EINVAL; @@ -193,9 +448,37 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) return -EINVAL; } -static unsigned long num_core_regs(void) +static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + unsigned int i; + int n = 0; + const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; + + for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { + /* + * The KVM_REG_ARM64_SVE regs must be used instead of + * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on + * SVE-enabled vcpus: + */ + if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i)) + continue; + + if (uindices) { + if (put_user(core_reg | i, uindices)) + return -EFAULT; + uindices++; + } + + n++; + } + + return n; +} + +static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) { - return sizeof(struct kvm_regs) / sizeof(__u32); + return copy_core_reg_indices(vcpu, NULL); } /** @@ -251,6 +534,67 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; } +static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) +{ + const unsigned int slices = vcpu_sve_slices(vcpu); + + if (!vcpu_has_sve(vcpu)) + return 0; + + /* Policed by KVM_GET_REG_LIST: */ + WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); + + return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) + + 1; /* KVM_REG_ARM64_SVE_VLS */ +} + +static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + const unsigned int slices = vcpu_sve_slices(vcpu); + u64 reg; + unsigned int i, n; + int num_regs = 0; + + if (!vcpu_has_sve(vcpu)) + return 0; + + /* Policed by KVM_GET_REG_LIST: */ + WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); + + /* + * Enumerate this first, so that userspace can save/restore in + * the order reported by KVM_GET_REG_LIST: + */ + reg = KVM_REG_ARM64_SVE_VLS; + if (put_user(reg, uindices++)) + return -EFAULT; + ++num_regs; + + for (i = 0; i < slices; i++) { + for (n = 0; n < SVE_NUM_ZREGS; n++) { + reg = KVM_REG_ARM64_SVE_ZREG(n, i); + if (put_user(reg, uindices++)) + return -EFAULT; + num_regs++; + } + + for (n = 0; n < SVE_NUM_PREGS; n++) { + reg = KVM_REG_ARM64_SVE_PREG(n, i); + if (put_user(reg, uindices++)) + return -EFAULT; + num_regs++; + } + + reg = KVM_REG_ARM64_SVE_FFR(i); + if (put_user(reg, uindices++)) + return -EFAULT; + num_regs++; + } + + return num_regs; +} + /** * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG * @@ -258,8 +602,15 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) */ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { - return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) - + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; + unsigned long res = 0; + + res += num_core_regs(vcpu); + res += num_sve_regs(vcpu); + res += kvm_arm_num_sys_reg_descs(vcpu); + res += kvm_arm_get_fw_num_regs(vcpu); + res += NUM_TIMER_REGS; + + return res; } /** @@ -269,23 +620,25 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) */ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { - unsigned int i; - const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; int ret; - for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { - if (put_user(core_reg | i, uindices)) - return -EFAULT; - uindices++; - } + ret = copy_core_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + + ret = copy_sve_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); - if (ret) + if (ret < 0) return ret; uindices += kvm_arm_get_fw_num_regs(vcpu); ret = copy_timer_indices(vcpu, uindices); - if (ret) + if (ret < 0) return ret; uindices += NUM_TIMER_REGS; @@ -298,12 +651,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) return -EINVAL; - /* Register group 16 means we want a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return get_core_reg(vcpu, reg); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) - return kvm_arm_get_fw_reg(vcpu, reg); + switch (reg->id & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); + case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); + case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); + } if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -317,12 +669,11 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) return -EINVAL; - /* Register group 16 means we set a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return set_core_reg(vcpu, reg); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) - return kvm_arm_set_fw_reg(vcpu, reg); + switch (reg->id & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); + case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); + case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); + } if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 0b7983442071..516aead3c2a9 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -173,20 +173,40 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) + +/* + * Handle the guest trying to use a ptrauth instruction, or trying to access a + * ptrauth register. + */ +void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *ctxt; + + if (vcpu_has_ptrauth(vcpu)) { + vcpu_ptrauth_enable(vcpu); + ctxt = vcpu->arch.host_cpu_context; + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + } else { + kvm_inject_undefined(vcpu); + } +} + /* * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into * a NOP). */ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) { - /* - * We don't currently support ptrauth in a guest, and we mask the ID - * registers to prevent well-behaved guests from trying to make use of - * it. - * - * Inject an UNDEF, as if the feature really isn't present. - */ - kvm_inject_undefined(vcpu); + kvm_arm_vcpu_ptrauth_trap(vcpu); return 1; } diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 675fdc186e3b..93ba3d7ef027 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -24,6 +24,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> +#include <asm/kvm_ptrauth.h> #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) @@ -64,6 +65,13 @@ ENTRY(__guest_enter) add x18, x0, #VCPU_CONTEXT + // Macro ptrauth_switch_to_guest format: + // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) + // The below macro to restore guest keys is not implemented in C code + // as it may cause Pointer Authentication key signing mismatch errors + // when this feature is enabled for kernel code. + ptrauth_switch_to_guest x18, x0, x1, x2 + // Restore guest regs x0-x17 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] @@ -118,6 +126,13 @@ ENTRY(__guest_exit) get_host_ctxt x2, x3 + // Macro ptrauth_switch_to_guest format: + // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) + // The below macro to save/restore keys is not implemented in C code + // as it may cause Pointer Authentication key signing mismatch errors + // when this feature is enabled for kernel code. + ptrauth_switch_to_host x1, x2, x3, x4, x5 + // Now restore the host regs restore_callee_saved_regs x2 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 3563fe655cd5..22b4c335e0b2 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -100,7 +100,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; val &= ~CPACR_EL1_ZEN; - if (!update_fp_enabled(vcpu)) { + if (update_fp_enabled(vcpu)) { + if (vcpu_has_sve(vcpu)) + val |= CPACR_EL1_ZEN; + } else { val &= ~CPACR_EL1_FPEN; __activate_traps_fpsimd32(vcpu); } @@ -317,16 +320,48 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) return true; } -static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) +/* Check for an FPSIMD/SVE trap and handle as appropriate */ +static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { - struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state; + bool vhe, sve_guest, sve_host; + u8 hsr_ec; - if (has_vhe()) - write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN, - cpacr_el1); - else + if (!system_supports_fpsimd()) + return false; + + if (system_supports_sve()) { + sve_guest = vcpu_has_sve(vcpu); + sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; + vhe = true; + } else { + sve_guest = false; + sve_host = false; + vhe = has_vhe(); + } + + hsr_ec = kvm_vcpu_trap_get_class(vcpu); + if (hsr_ec != ESR_ELx_EC_FP_ASIMD && + hsr_ec != ESR_ELx_EC_SVE) + return false; + + /* Don't handle SVE traps for non-SVE vcpus here: */ + if (!sve_guest) + if (hsr_ec != ESR_ELx_EC_FP_ASIMD) + return false; + + /* Valid trap. Switch the context: */ + + if (vhe) { + u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; + + if (sve_guest) + reg |= CPACR_EL1_ZEN; + + write_sysreg(reg, cpacr_el1); + } else { write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, cptr_el2); + } isb(); @@ -335,21 +370,28 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) * In the SVE case, VHE is assumed: it is enforced by * Kconfig and kvm_arch_init(). */ - if (system_supports_sve() && - (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) { + if (sve_host) { struct thread_struct *thread = container_of( - host_fpsimd, + vcpu->arch.host_fpsimd_state, struct thread_struct, uw.fpsimd_state); - sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr); + sve_save_state(sve_pffr(thread), + &vcpu->arch.host_fpsimd_state->fpsr); } else { - __fpsimd_save_state(host_fpsimd); + __fpsimd_save_state(vcpu->arch.host_fpsimd_state); } vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } - __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); + if (sve_guest) { + sve_load_state(vcpu_sve_pffr(vcpu), + &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, + sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); + write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); + } else { + __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); + } /* Skip restoring fpexc32 for AArch64 guests */ if (!(read_sysreg(hcr_el2) & HCR_RW)) @@ -385,10 +427,10 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) * and restore the guest context lazily. * If FP/SIMD is not implemented, handle the trap and inject an * undefined instruction exception to the guest. + * Similarly for trapped SVE accesses. */ - if (system_supports_fpsimd() && - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD) - return __hyp_switch_fpsimd(vcpu); + if (__hyp_handle_fpsimd(vcpu)) + return true; if (!__populate_fault_info(vcpu)) return true; @@ -524,6 +566,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; + bool pmu_switch_needed; u64 exit_code; /* @@ -543,6 +586,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; + pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); + __sysreg_save_state_nvhe(host_ctxt); __activate_vm(kern_hyp_va(vcpu->kvm)); @@ -589,6 +634,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) */ __debug_switch_to_host(vcpu); + if (pmu_switch_needed) + __pmu_switch_to_host(host_ctxt); + /* Returning to host will clear PSR.I, remask PMR if needed */ if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQOFF); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c new file mode 100644 index 000000000000..3da94a5bb6b7 --- /dev/null +++ b/arch/arm64/kvm/pmu.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Arm Limited + * Author: Andrew Murray <Andrew.Murray@arm.com> + */ +#include <linux/kvm_host.h> +#include <linux/perf_event.h> +#include <asm/kvm_hyp.h> + +/* + * Given the perf event attributes and system type, determine + * if we are going to need to switch counters at guest entry/exit. + */ +static bool kvm_pmu_switch_needed(struct perf_event_attr *attr) +{ + /** + * With VHE the guest kernel runs at EL1 and the host at EL2, + * where user (EL0) is excluded then we have no reason to switch + * counters. + */ + if (has_vhe() && attr->exclude_user) + return false; + + /* Only switch if attributes are different */ + return (attr->exclude_host != attr->exclude_guest); +} + +/* + * Add events to track that we may want to switch at guest entry/exit + * time. + */ +void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) +{ + struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); + + if (!kvm_pmu_switch_needed(attr)) + return; + + if (!attr->exclude_host) + ctx->pmu_events.events_host |= set; + if (!attr->exclude_guest) + ctx->pmu_events.events_guest |= set; +} + +/* + * Stop tracking events + */ +void kvm_clr_pmu_events(u32 clr) +{ + struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); + + ctx->pmu_events.events_host &= ~clr; + ctx->pmu_events.events_guest &= ~clr; +} + +/** + * Disable host events, enable guest events + */ +bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_host_data *host; + struct kvm_pmu_events *pmu; + + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + pmu = &host->pmu_events; + + if (pmu->events_host) + write_sysreg(pmu->events_host, pmcntenclr_el0); + + if (pmu->events_guest) + write_sysreg(pmu->events_guest, pmcntenset_el0); + + return (pmu->events_host || pmu->events_guest); +} + +/** + * Disable guest events, enable host events + */ +void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_host_data *host; + struct kvm_pmu_events *pmu; + + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + pmu = &host->pmu_events; + + if (pmu->events_guest) + write_sysreg(pmu->events_guest, pmcntenclr_el0); + + if (pmu->events_host) + write_sysreg(pmu->events_host, pmcntenset_el0); +} + +#define PMEVTYPER_READ_CASE(idx) \ + case idx: \ + return read_sysreg(pmevtyper##idx##_el0) + +#define PMEVTYPER_WRITE_CASE(idx) \ + case idx: \ + write_sysreg(val, pmevtyper##idx##_el0); \ + break + +#define PMEVTYPER_CASES(readwrite) \ + PMEVTYPER_##readwrite##_CASE(0); \ + PMEVTYPER_##readwrite##_CASE(1); \ + PMEVTYPER_##readwrite##_CASE(2); \ + PMEVTYPER_##readwrite##_CASE(3); \ + PMEVTYPER_##readwrite##_CASE(4); \ + PMEVTYPER_##readwrite##_CASE(5); \ + PMEVTYPER_##readwrite##_CASE(6); \ + PMEVTYPER_##readwrite##_CASE(7); \ + PMEVTYPER_##readwrite##_CASE(8); \ + PMEVTYPER_##readwrite##_CASE(9); \ + PMEVTYPER_##readwrite##_CASE(10); \ + PMEVTYPER_##readwrite##_CASE(11); \ + PMEVTYPER_##readwrite##_CASE(12); \ + PMEVTYPER_##readwrite##_CASE(13); \ + PMEVTYPER_##readwrite##_CASE(14); \ + PMEVTYPER_##readwrite##_CASE(15); \ + PMEVTYPER_##readwrite##_CASE(16); \ + PMEVTYPER_##readwrite##_CASE(17); \ + PMEVTYPER_##readwrite##_CASE(18); \ + PMEVTYPER_##readwrite##_CASE(19); \ + PMEVTYPER_##readwrite##_CASE(20); \ + PMEVTYPER_##readwrite##_CASE(21); \ + PMEVTYPER_##readwrite##_CASE(22); \ + PMEVTYPER_##readwrite##_CASE(23); \ + PMEVTYPER_##readwrite##_CASE(24); \ + PMEVTYPER_##readwrite##_CASE(25); \ + PMEVTYPER_##readwrite##_CASE(26); \ + PMEVTYPER_##readwrite##_CASE(27); \ + PMEVTYPER_##readwrite##_CASE(28); \ + PMEVTYPER_##readwrite##_CASE(29); \ + PMEVTYPER_##readwrite##_CASE(30) + +/* + * Read a value direct from PMEVTYPER<idx> where idx is 0-30 + * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + */ +static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) +{ + switch (idx) { + PMEVTYPER_CASES(READ); + case ARMV8_PMU_CYCLE_IDX: + return read_sysreg(pmccfiltr_el0); + default: + WARN_ON(1); + } + + return 0; +} + +/* + * Write a value direct to PMEVTYPER<idx> where idx is 0-30 + * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + */ +static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val) +{ + switch (idx) { + PMEVTYPER_CASES(WRITE); + case ARMV8_PMU_CYCLE_IDX: + write_sysreg(val, pmccfiltr_el0); + break; + default: + WARN_ON(1); + } +} + +/* + * Modify ARMv8 PMU events to include EL0 counting + */ +static void kvm_vcpu_pmu_enable_el0(unsigned long events) +{ + u64 typer; + u32 counter; + + for_each_set_bit(counter, &events, 32) { + typer = kvm_vcpu_pmu_read_evtype_direct(counter); + typer &= ~ARMV8_PMU_EXCLUDE_EL0; + kvm_vcpu_pmu_write_evtype_direct(counter, typer); + } +} + +/* + * Modify ARMv8 PMU events to exclude EL0 counting + */ +static void kvm_vcpu_pmu_disable_el0(unsigned long events) +{ + u64 typer; + u32 counter; + + for_each_set_bit(counter, &events, 32) { + typer = kvm_vcpu_pmu_read_evtype_direct(counter); + typer |= ARMV8_PMU_EXCLUDE_EL0; + kvm_vcpu_pmu_write_evtype_direct(counter, typer); + } +} + +/* + * On VHE ensure that only guest events have EL0 counting enabled + */ +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_host_data *host; + u32 events_guest, events_host; + + if (!has_vhe()) + return; + + host_ctxt = vcpu->arch.host_cpu_context; + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + events_guest = host->pmu_events.events_guest; + events_host = host->pmu_events.events_host; + + kvm_vcpu_pmu_enable_el0(events_guest); + kvm_vcpu_pmu_disable_el0(events_host); +} + +/* + * On VHE ensure that only host events have EL0 counting enabled + */ +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_host_data *host; + u32 events_guest, events_host; + + if (!has_vhe()) + return; + + host_ctxt = vcpu->arch.host_cpu_context; + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + events_guest = host->pmu_events.events_guest; + events_host = host->pmu_events.events_host; + + kvm_vcpu_pmu_enable_el0(events_host); + kvm_vcpu_pmu_disable_el0(events_guest); +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e2a0500cd7a2..1140b4485575 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -20,20 +20,26 @@ */ #include <linux/errno.h> +#include <linux/kernel.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/hw_breakpoint.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> #include <kvm/arm_arch_timer.h> #include <asm/cpufeature.h> #include <asm/cputype.h> +#include <asm/fpsimd.h> #include <asm/ptrace.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_coproc.h> #include <asm/kvm_emulate.h> #include <asm/kvm_mmu.h> +#include <asm/virt.h> /* Maximum phys_shift supported for any VM on this host */ static u32 kvm_ipa_limit; @@ -92,6 +98,14 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_VM_IPA_SIZE: r = kvm_ipa_limit; break; + case KVM_CAP_ARM_SVE: + r = system_supports_sve(); + break; + case KVM_CAP_ARM_PTRAUTH_ADDRESS: + case KVM_CAP_ARM_PTRAUTH_GENERIC: + r = has_vhe() && system_supports_address_auth() && + system_supports_generic_auth(); + break; default: r = 0; } @@ -99,13 +113,148 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +unsigned int kvm_sve_max_vl; + +int kvm_arm_init_sve(void) +{ + if (system_supports_sve()) { + kvm_sve_max_vl = sve_max_virtualisable_vl; + + /* + * The get_sve_reg()/set_sve_reg() ioctl interface will need + * to be extended with multiple register slice support in + * order to support vector lengths greater than + * SVE_VL_ARCH_MAX: + */ + if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) + kvm_sve_max_vl = SVE_VL_ARCH_MAX; + + /* + * Don't even try to make use of vector lengths that + * aren't available on all CPUs, for now: + */ + if (kvm_sve_max_vl < sve_max_vl) + pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", + kvm_sve_max_vl); + } + + return 0; +} + +static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) +{ + if (!system_supports_sve()) + return -EINVAL; + + /* Verify that KVM startup enforced this when SVE was detected: */ + if (WARN_ON(!has_vhe())) + return -EINVAL; + + vcpu->arch.sve_max_vl = kvm_sve_max_vl; + + /* + * Userspace can still customize the vector lengths by writing + * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until + * kvm_arm_vcpu_finalize(), which freezes the configuration. + */ + vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; + + return 0; +} + +/* + * Finalize vcpu's maximum SVE vector length, allocating + * vcpu->arch.sve_state as necessary. + */ +static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) +{ + void *buf; + unsigned int vl; + + vl = vcpu->arch.sve_max_vl; + + /* + * Resposibility for these properties is shared between + * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and + * set_sve_vls(). Double-check here just to be sure: + */ + if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || + vl > SVE_VL_ARCH_MAX)) + return -EIO; + + buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + vcpu->arch.sve_state = buf; + vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; + return 0; +} + +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) +{ + switch (feature) { + case KVM_ARM_VCPU_SVE: + if (!vcpu_has_sve(vcpu)) + return -EINVAL; + + if (kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; + + return kvm_vcpu_finalize_sve(vcpu); + } + + return -EINVAL; +} + +bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) +{ + if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) + return false; + + return true; +} + +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + kfree(vcpu->arch.sve_state); +} + +static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) +{ + if (vcpu_has_sve(vcpu)) + memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); +} + +static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) +{ + /* Support ptrauth only if the system supports these capabilities. */ + if (!has_vhe()) + return -EINVAL; + + if (!system_supports_address_auth() || + !system_supports_generic_auth()) + return -EINVAL; + /* + * For now make sure that both address/generic pointer authentication + * features are requested by the userspace together. + */ + if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || + !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) + return -EINVAL; + + vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; + return 0; +} + /** * kvm_reset_vcpu - sets core registers and sys_regs to reset value * @vcpu: The VCPU pointer * * This function finds the right table above and sets the registers on * the virtual CPU struct to their architecturally defined reset - * values. + * values, except for registers whose reset is deferred until + * kvm_arm_vcpu_finalize(). * * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT * ioctl or as part of handling a request issued by another VCPU in the PSCI @@ -131,6 +280,22 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (loaded) kvm_arch_vcpu_put(vcpu); + if (!kvm_arm_vcpu_sve_finalized(vcpu)) { + if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { + ret = kvm_vcpu_enable_sve(vcpu); + if (ret) + goto out; + } + } else { + kvm_vcpu_reset_sve(vcpu); + } + + if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || + test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { + if (kvm_vcpu_enable_ptrauth(vcpu)) + goto out; + } + switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 539feecda5b8..857b226bcdde 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -695,6 +695,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val |= p->regval & ARMV8_PMU_PMCR_MASK; __vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); + kvm_vcpu_pmu_restore_guest(vcpu); } else { /* PMCR.P & PMCR.C are RAZ */ val = __vcpu_sys_reg(vcpu, PMCR_EL0) @@ -850,6 +851,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; + kvm_vcpu_pmu_restore_guest(vcpu); } else { p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; } @@ -875,6 +877,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* accessing PMCNTENSET_EL0 */ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; kvm_pmu_enable_counter(vcpu, val); + kvm_vcpu_pmu_restore_guest(vcpu); } else { /* accessing PMCNTENCLR_EL0 */ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; @@ -1007,6 +1010,37 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } +static bool trap_ptrauth(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + kvm_arm_vcpu_ptrauth_trap(vcpu); + + /* + * Return false for both cases as we never skip the trapped + * instruction: + * + * - Either we re-execute the same key register access instruction + * after enabling ptrauth. + * - Or an UNDEF is injected as ptrauth is not supported/enabled. + */ + return false; +} + +static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST; +} + +#define __PTRAUTH_KEY(k) \ + { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \ + .visibility = ptrauth_visibility} + +#define PTRAUTH_KEY(k) \ + __PTRAUTH_KEY(k ## KEYLO_EL1), \ + __PTRAUTH_KEY(k ## KEYHI_EL1) + static bool access_arch_timer(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -1044,25 +1078,20 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, } /* Read a sanitised cpufeature ID register by sys_reg_desc */ -static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) +static u64 read_id_reg(const struct kvm_vcpu *vcpu, + struct sys_reg_desc const *r, bool raz) { u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1) { - if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) - kvm_debug("SVE unsupported for guests, suppressing\n"); - + if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); - } else if (id == SYS_ID_AA64ISAR1_EL1) { - const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) | - (0xfUL << ID_AA64ISAR1_API_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPI_SHIFT); - if (val & ptrauth_mask) - kvm_debug("ptrauth unsupported for guests, suppressing\n"); - val &= ~ptrauth_mask; + } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { + val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | + (0xfUL << ID_AA64ISAR1_API_SHIFT) | + (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | + (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); } return val; @@ -1078,7 +1107,7 @@ static bool __access_id_reg(struct kvm_vcpu *vcpu, if (p->is_write) return write_to_read_only(vcpu, p, r); - p->regval = read_id_reg(r, raz); + p->regval = read_id_reg(vcpu, r, raz); return true; } @@ -1100,6 +1129,81 @@ static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); static u64 sys_reg_to_index(const struct sys_reg_desc *reg); +/* Visibility overrides for SVE-specific control registers */ +static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (vcpu_has_sve(vcpu)) + return 0; + + return REG_HIDDEN_USER | REG_HIDDEN_GUEST; +} + +/* Visibility overrides for SVE-specific ID registers */ +static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (vcpu_has_sve(vcpu)) + return 0; + + return REG_HIDDEN_USER; +} + +/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */ +static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu) +{ + if (!vcpu_has_sve(vcpu)) + return 0; + + return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1); +} + +static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + if (p->is_write) + return write_to_read_only(vcpu, p, rd); + + p->regval = guest_id_aa64zfr0_el1(vcpu); + return true; +} + +static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + u64 val; + + if (WARN_ON(!vcpu_has_sve(vcpu))) + return -ENOENT; + + val = guest_id_aa64zfr0_el1(vcpu); + return reg_to_user(uaddr, &val, reg->id); +} + +static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + const u64 id = sys_reg_to_index(rd); + int err; + u64 val; + + if (WARN_ON(!vcpu_has_sve(vcpu))) + return -ENOENT; + + err = reg_from_user(&val, uaddr, id); + if (err) + return err; + + /* This is what we mean by invariant: you can't change it. */ + if (val != guest_id_aa64zfr0_el1(vcpu)) + return -EINVAL; + + return 0; +} + /* * cpufeature ID register user accessors * @@ -1107,16 +1211,18 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg); * are stored, and for set_id_reg() we don't allow the effective value * to be changed. */ -static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, +static int __get_id_reg(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, void __user *uaddr, bool raz) { const u64 id = sys_reg_to_index(rd); - const u64 val = read_id_reg(rd, raz); + const u64 val = read_id_reg(vcpu, rd, raz); return reg_to_user(uaddr, &val, id); } -static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, +static int __set_id_reg(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, void __user *uaddr, bool raz) { const u64 id = sys_reg_to_index(rd); @@ -1128,7 +1234,7 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, return err; /* This is what we mean by invariant: you can't change it. */ - if (val != read_id_reg(rd, raz)) + if (val != read_id_reg(vcpu, rd, raz)) return -EINVAL; return 0; @@ -1137,25 +1243,25 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __get_id_reg(rd, uaddr, false); + return __get_id_reg(vcpu, rd, uaddr, false); } static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __set_id_reg(rd, uaddr, false); + return __set_id_reg(vcpu, rd, uaddr, false); } static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __get_id_reg(rd, uaddr, true); + return __get_id_reg(vcpu, rd, uaddr, true); } static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __set_id_reg(rd, uaddr, true); + return __set_id_reg(vcpu, rd, uaddr, true); } static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, @@ -1343,7 +1449,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_SANITISED(ID_AA64PFR1_EL1), ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), - ID_UNALLOCATED(4,4), + { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility }, ID_UNALLOCATED(4,5), ID_UNALLOCATED(4,6), ID_UNALLOCATED(4,7), @@ -1380,10 +1486,17 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, + { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, + PTRAUTH_KEY(APIA), + PTRAUTH_KEY(APIB), + PTRAUTH_KEY(APDA), + PTRAUTH_KEY(APDB), + PTRAUTH_KEY(APGA), + { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, @@ -1924,6 +2037,12 @@ static void perform_access(struct kvm_vcpu *vcpu, { trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); + /* Check for regs disabled by runtime config */ + if (sysreg_hidden_from_guest(vcpu, r)) { + kvm_inject_undefined(vcpu); + return; + } + /* * Not having an accessor means that we have configured a trap * that we don't know how to handle. This certainly qualifies @@ -2435,6 +2554,10 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg if (!r) return get_invariant_sys_reg(reg->id, uaddr); + /* Check for regs disabled by runtime config */ + if (sysreg_hidden_from_user(vcpu, r)) + return -ENOENT; + if (r->get_user) return (r->get_user)(vcpu, r, reg, uaddr); @@ -2456,6 +2579,10 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg if (!r) return set_invariant_sys_reg(reg->id, uaddr); + /* Check for regs disabled by runtime config */ + if (sysreg_hidden_from_user(vcpu, r)) + return -ENOENT; + if (r->set_user) return (r->set_user)(vcpu, r, reg, uaddr); @@ -2512,7 +2639,8 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) return true; } -static int walk_one_sys_reg(const struct sys_reg_desc *rd, +static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, u64 __user **uind, unsigned int *total) { @@ -2523,6 +2651,9 @@ static int walk_one_sys_reg(const struct sys_reg_desc *rd, if (!(rd->reg || rd->get_user)) return 0; + if (sysreg_hidden_from_user(vcpu, rd)) + return 0; + if (!copy_reg_to_user(rd, uind)) return -EFAULT; @@ -2551,9 +2682,9 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) int cmp = cmp_sys_reg(i1, i2); /* target-specific overrides generic entry. */ if (cmp <= 0) - err = walk_one_sys_reg(i1, &uind, &total); + err = walk_one_sys_reg(vcpu, i1, &uind, &total); else - err = walk_one_sys_reg(i2, &uind, &total); + err = walk_one_sys_reg(vcpu, i2, &uind, &total); if (err) return err; diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index 3b1bc7f01d0b..2be99508dcb9 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -64,8 +64,15 @@ struct sys_reg_desc { const struct kvm_one_reg *reg, void __user *uaddr); int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr); + + /* Return mask of REG_* runtime visibility overrides */ + unsigned int (*visibility)(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd); }; +#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */ +#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */ + static inline void print_sys_reg_instr(const struct sys_reg_params *p) { /* Look, we even formatted it for you to paste into the table! */ @@ -102,6 +109,24 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r __vcpu_sys_reg(vcpu, r->reg) = r->val; } +static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + if (likely(!r->visibility)) + return false; + + return r->visibility(vcpu, r) & REG_HIDDEN_GUEST; +} + +static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + if (likely(!r->visibility)) + return false; + + return r->visibility(vcpu, r) & REG_HIDDEN_USER; +} + static inline int cmp_sys_reg(const struct sys_reg_desc *i1, const struct sys_reg_desc *i2) { |