diff options
author | Linus Torvalds | 2019-06-20 13:50:37 -0700 |
---|---|---|
committer | Linus Torvalds | 2019-06-20 13:50:37 -0700 |
commit | b3e978337b25b042aa653652a029e3d798814c12 (patch) | |
tree | f03c4b0b6d9eadb4cef3daa8c1f9920415cdcefe /arch/arm64 | |
parent | e929387449cf631e96840296a01922be1ef3c832 (diff) | |
parent | b21e31b253048b7f9768ca7cc270e67765fd6ba2 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini:
"Fixes for ARM and x86, plus selftest patches and nicer structs for
nested state save/restore"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: nVMX: reorganize initial steps of vmx_set_nested_state
KVM: arm/arm64: Fix emulated ptimer irq injection
tests: kvm: Check for a kernel warning
kvm: tests: Sort tests in the Makefile alphabetically
KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT
KVM: x86: Modify struct kvm_nested_state to have explicit fields for data
KVM: fix typo in documentation
KVM: nVMX: use correct clean fields when copying from eVMCS
KVM: arm/arm64: vgic: Fix kvm_device leak in vgic_its_destroy
KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST
KVM: arm64: Implement vq_present() as a macro
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/kvm/guest.c | 65 |
1 files changed, 43 insertions, 22 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 3ae2f82fca46..c8aa00179363 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -70,10 +70,8 @@ static u64 core_reg_offset_from_id(u64 id) return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); } -static int validate_core_offset(const struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) +static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) { - u64 off = core_reg_offset_from_id(reg->id); int size; switch (off) { @@ -103,8 +101,7 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu, return -EINVAL; } - if (KVM_REG_SIZE(reg->id) != size || - !IS_ALIGNED(off, size / sizeof(__u32))) + if (!IS_ALIGNED(off, size / sizeof(__u32))) return -EINVAL; /* @@ -115,6 +112,21 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu, if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) return -EINVAL; + return size; +} + +static int validate_core_offset(const struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + u64 off = core_reg_offset_from_id(reg->id); + int size = core_reg_size_from_offset(vcpu, off); + + if (size < 0) + return -EINVAL; + + if (KVM_REG_SIZE(reg->id) != size) + return -EINVAL; + return 0; } @@ -207,13 +219,7 @@ out: #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) - -static bool vq_present( - const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS], - unsigned int vq) -{ - return (*vqs)[vq_word(vq)] & vq_mask(vq); -} +#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq)) static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -258,7 +264,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) max_vq = 0; for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) - if (vq_present(&vqs, vq)) + if (vq_present(vqs, vq)) max_vq = vq; if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) @@ -272,7 +278,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) * maximum: */ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) - if (vq_present(&vqs, vq) != sve_vq_available(vq)) + if (vq_present(vqs, vq) != sve_vq_available(vq)) return -EINVAL; /* Can't run with no vector lengths at all: */ @@ -453,19 +459,34 @@ static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, { unsigned int i; int n = 0; - const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { - /* - * The KVM_REG_ARM64_SVE regs must be used instead of - * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on - * SVE-enabled vcpus: - */ - if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i)) + u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; + int size = core_reg_size_from_offset(vcpu, i); + + if (size < 0) + continue; + + switch (size) { + case sizeof(__u32): + reg |= KVM_REG_SIZE_U32; + break; + + case sizeof(__u64): + reg |= KVM_REG_SIZE_U64; + break; + + case sizeof(__uint128_t): + reg |= KVM_REG_SIZE_U128; + break; + + default: + WARN_ON(1); continue; + } if (uindices) { - if (put_user(core_reg | i, uindices)) + if (put_user(reg, uindices)) return -EFAULT; uindices++; } |