diff options
author | Linus Torvalds | 2022-05-26 14:20:14 -0700 |
---|---|---|
committer | Linus Torvalds | 2022-05-26 14:20:14 -0700 |
commit | bf9095424d027e942e1d1ee74977e17b7df8e455 (patch) | |
tree | 57659cf68b7df09005bc5ada4d315d66472cebf3 /tools | |
parent | 98931dd95fd489fcbfa97da563505a6f071d7c77 (diff) | |
parent | ffd1925a596ce68bed7d81c61cb64bc35f788a9d (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"S390:
- ultravisor communication device driver
- fix TEID on terminating storage key ops
RISC-V:
- Added Sv57x4 support for G-stage page table
- Added range based local HFENCE functions
- Added remote HFENCE functions based on VCPU requests
- Added ISA extension registers in ONE_REG interface
- Updated KVM RISC-V maintainers entry to cover selftests support
ARM:
- Add support for the ARMv8.6 WFxT extension
- Guard pages for the EL2 stacks
- Trap and emulate AArch32 ID registers to hide unsupported features
- Ability to select and save/restore the set of hypercalls exposed to
the guest
- Support for PSCI-initiated suspend in collaboration with userspace
- GICv3 register-based LPI invalidation support
- Move host PMU event merging into the vcpu data structure
- GICv3 ITS save/restore fixes
- The usual set of small-scale cleanups and fixes
x86:
- New ioctls to get/set TSC frequency for a whole VM
- Allow userspace to opt out of hypercall patching
- Only do MSR filtering for MSRs accessed by rdmsr/wrmsr
AMD SEV improvements:
- Add KVM_EXIT_SHUTDOWN metadata for SEV-ES
- V_TSC_AUX support
Nested virtualization improvements for AMD:
- Support for "nested nested" optimizations (nested vVMLOAD/VMSAVE,
nested vGIF)
- Allow AVIC to co-exist with a nested guest running
- Fixes for LBR virtualizations when a nested guest is running, and
nested LBR virtualization support
- PAUSE filtering for nested hypervisors
Guest support:
- Decoupling of vcpu_is_preempted from PV spinlocks"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (199 commits)
KVM: x86: Fix the intel_pt PMI handling wrongly considered from guest
KVM: selftests: x86: Sync the new name of the test case to .gitignore
Documentation: kvm: reorder ARM-specific section about KVM_SYSTEM_EVENT_SUSPEND
x86, kvm: use correct GFP flags for preemption disabled
KVM: LAPIC: Drop pending LAPIC timer injection when canceling the timer
x86/kvm: Alloc dummy async #PF token outside of raw spinlock
KVM: x86: avoid calling x86 emulator without a decoded instruction
KVM: SVM: Use kzalloc for sev ioctl interfaces to prevent kernel data leak
x86/fpu: KVM: Set the base guest FPU uABI size to sizeof(struct kvm_xsave)
s390/uv_uapi: depend on CONFIG_S390
KVM: selftests: x86: Fix test failure on arch lbr capable platforms
KVM: LAPIC: Trace LAPIC timer expiration on every vmentry
KVM: s390: selftest: Test suppression indication on key prot exception
KVM: s390: Don't indicate suppression on dirtying, failing memop
selftests: drivers/s390x: Add uvdevice tests
drivers/s390/char: Add Ultravisor io device
MAINTAINERS: Update KVM RISC-V entry to cover selftests support
RISC-V: KVM: Introduce ISA extension register
RISC-V: KVM: Cleanup stale TLB entries when host CPU changes
RISC-V: KVM: Add remote HFENCE functions based on VCPU requests
...
Diffstat (limited to 'tools')
23 files changed, 1840 insertions, 174 deletions
diff --git a/tools/include/linux/arm-smccc.h b/tools/include/linux/arm-smccc.h new file mode 100644 index 000000000000..63ce9bebccd3 --- /dev/null +++ b/tools/include/linux/arm-smccc.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, Linaro Limited + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include <linux/const.h> + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * https://developer.arm.com/docs/den0028/latest + * + * This code is up-to-date with version DEN 0028 C + */ + +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_STANDARD_HYP 5 +#define ARM_SMCCC_OWNER_VENDOR_HYP 6 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01 + +#define ARM_SMCCC_QUIRK_NONE 0 +#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ + +#define ARM_SMCCC_VERSION_1_0 0x10000 +#define ARM_SMCCC_VERSION_1_1 0x10001 +#define ARM_SMCCC_VERSION_1_2 0x10002 +#define ARM_SMCCC_VERSION_1_3 0x10003 + +#define ARM_SMCCC_1_3_SVE_HINT 0x10000 + +#define ARM_SMCCC_VERSION_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0) + +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 1) + +#define ARM_SMCCC_ARCH_SOC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 2) + +#define ARM_SMCCC_ARCH_WORKAROUND_1 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x8000) + +#define ARM_SMCCC_ARCH_WORKAROUND_2 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x7fff) + +#define ARM_SMCCC_ARCH_WORKAROUND_3 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x3fff) + +#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_FUNC_QUERY_CALL_UID) + +/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU + +/* KVM "vendor specific" services */ +#define ARM_SMCCC_KVM_FUNC_FEATURES 0 +#define ARM_SMCCC_KVM_FUNC_PTP 1 +#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 +#define ARM_SMCCC_KVM_NUM_FUNCS 128 + +#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_FEATURES) + +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 + +/* + * ptp_kvm is a feature used for time sync between vm and host. + * ptp_kvm module in guest kernel will get service from host using + * this hypercall ID. + */ +#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_PTP) + +/* ptp_kvm counter type ID */ +#define KVM_PTP_VIRT_COUNTER 0 +#define KVM_PTP_PHYS_COUNTER 1 + +/* Paravirtualised time calls (defined by ARM DEN0057A) */ +#define ARM_SMCCC_HV_PV_TIME_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x20) + +#define ARM_SMCCC_HV_PV_TIME_ST \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x21) + +/* TRNG entropy source calls (defined by ARM DEN0098) */ +#define ARM_SMCCC_TRNG_VERSION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x50) + +#define ARM_SMCCC_TRNG_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x51) + +#define ARM_SMCCC_TRNG_GET_UUID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x52) + +#define ARM_SMCCC_TRNG_RND32 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x53) + +#define ARM_SMCCC_TRNG_RND64 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x53) + +/* + * Return codes defined in ARM DEN 0070A + * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C + */ +#define SMCCC_RET_SUCCESS 0 +#define SMCCC_RET_NOT_SUPPORTED -1 +#define SMCCC_RET_NOT_REQUIRED -2 +#define SMCCC_RET_INVALID_PARAMETER -3 + +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 0aedcd76cf0f..de11992dc577 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -11,6 +11,7 @@ TARGETS += cpufreq TARGETS += cpu-hotplug TARGETS += damon TARGETS += drivers/dma-buf +TARGETS += drivers/s390x/uvdevice TARGETS += efivarfs TARGETS += exec TARGETS += filesystems diff --git a/tools/testing/selftests/drivers/.gitignore b/tools/testing/selftests/drivers/.gitignore index ca74f2e1c719..09e23b5afa96 100644 --- a/tools/testing/selftests/drivers/.gitignore +++ b/tools/testing/selftests/drivers/.gitignore @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only /dma-buf/udmabuf +/s390x/uvdevice/test_uvdevice diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/Makefile b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile new file mode 100644 index 000000000000..5e701d2708d4 --- /dev/null +++ b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile @@ -0,0 +1,22 @@ +include ../../../../../build/Build.include + +UNAME_M := $(shell uname -m) + +ifneq ($(UNAME_M),s390x) +nothing: +.PHONY: all clean run_tests install +.SILENT: +else + +TEST_GEN_PROGS := test_uvdevice + +top_srcdir ?= ../../../../../.. +KSFT_KHDR_INSTALL := 1 +khdr_dir = $(top_srcdir)/usr/include +LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include + +CFLAGS += -Wall -Werror -static -I$(khdr_dir) -I$(LINUX_TOOL_ARCH_INCLUDE) + +include ../../../lib.mk + +endif diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/config b/tools/testing/selftests/drivers/s390x/uvdevice/config new file mode 100644 index 000000000000..f28a04b99eff --- /dev/null +++ b/tools/testing/selftests/drivers/s390x/uvdevice/config @@ -0,0 +1 @@ +CONFIG_S390_UV_UAPI=y diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c b/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c new file mode 100644 index 000000000000..ea0cdc37b44f --- /dev/null +++ b/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * selftest for the Ultravisor UAPI device + * + * Copyright IBM Corp. 2022 + * Author(s): Steffen Eiden <seiden@linux.ibm.com> + */ + +#include <stdint.h> +#include <fcntl.h> +#include <errno.h> +#include <sys/ioctl.h> +#include <sys/mman.h> + +#include <asm/uvdevice.h> + +#include "../../../kselftest_harness.h" + +#define UV_PATH "/dev/uv" +#define BUFFER_SIZE 0x200 +FIXTURE(uvio_fixture) { + int uv_fd; + struct uvio_ioctl_cb uvio_ioctl; + uint8_t buffer[BUFFER_SIZE]; + __u64 fault_page; +}; + +FIXTURE_VARIANT(uvio_fixture) { + unsigned long ioctl_cmd; + uint32_t arg_size; +}; + +FIXTURE_VARIANT_ADD(uvio_fixture, att) { + .ioctl_cmd = UVIO_IOCTL_ATT, + .arg_size = sizeof(struct uvio_attest), +}; + +FIXTURE_SETUP(uvio_fixture) +{ + self->uv_fd = open(UV_PATH, O_ACCMODE); + + self->uvio_ioctl.argument_addr = (__u64)self->buffer; + self->uvio_ioctl.argument_len = variant->arg_size; + self->fault_page = + (__u64)mmap(NULL, (size_t)getpagesize(), PROT_NONE, MAP_ANONYMOUS, -1, 0); +} + +FIXTURE_TEARDOWN(uvio_fixture) +{ + if (self->uv_fd) + close(self->uv_fd); + munmap((void *)self->fault_page, (size_t)getpagesize()); +} + +TEST_F(uvio_fixture, fault_ioctl_arg) +{ + int rc, errno_cache; + + rc = ioctl(self->uv_fd, variant->ioctl_cmd, NULL); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EFAULT); + + rc = ioctl(self->uv_fd, variant->ioctl_cmd, self->fault_page); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EFAULT); +} + +TEST_F(uvio_fixture, fault_uvio_arg) +{ + int rc, errno_cache; + + self->uvio_ioctl.argument_addr = 0; + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EFAULT); + + self->uvio_ioctl.argument_addr = self->fault_page; + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EFAULT); +} + +/* + * Test to verify that IOCTLs with invalid values in the ioctl_control block + * are rejected. + */ +TEST_F(uvio_fixture, inval_ioctl_cb) +{ + int rc, errno_cache; + + self->uvio_ioctl.argument_len = 0; + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + + self->uvio_ioctl.argument_len = (uint32_t)-1; + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + self->uvio_ioctl.argument_len = variant->arg_size; + + self->uvio_ioctl.flags = (uint32_t)-1; + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + self->uvio_ioctl.flags = 0; + + memset(self->uvio_ioctl.reserved14, 0xff, sizeof(self->uvio_ioctl.reserved14)); + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + + memset(&self->uvio_ioctl, 0x11, sizeof(self->uvio_ioctl)); + rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl); + ASSERT_EQ(rc, -1); +} + +TEST_F(uvio_fixture, inval_ioctl_cmd) +{ + int rc, errno_cache; + uint8_t nr = _IOC_NR(variant->ioctl_cmd); + unsigned long cmds[] = { + _IOWR('a', nr, struct uvio_ioctl_cb), + _IOWR(UVIO_TYPE_UVC, nr, int), + _IO(UVIO_TYPE_UVC, nr), + _IOR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb), + _IOW(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb), + }; + + for (size_t i = 0; i < ARRAY_SIZE(cmds); i++) { + rc = ioctl(self->uv_fd, cmds[i], &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, ENOTTY); + } +} + +struct test_attest_buffer { + uint8_t arcb[0x180]; + uint8_t meas[64]; + uint8_t add[32]; +}; + +FIXTURE(attest_fixture) { + int uv_fd; + struct uvio_ioctl_cb uvio_ioctl; + struct uvio_attest uvio_attest; + struct test_attest_buffer attest_buffer; + __u64 fault_page; +}; + +FIXTURE_SETUP(attest_fixture) +{ + self->uv_fd = open(UV_PATH, O_ACCMODE); + + self->uvio_ioctl.argument_addr = (__u64)&self->uvio_attest; + self->uvio_ioctl.argument_len = sizeof(self->uvio_attest); + + self->uvio_attest.arcb_addr = (__u64)&self->attest_buffer.arcb; + self->uvio_attest.arcb_len = sizeof(self->attest_buffer.arcb); + + self->uvio_attest.meas_addr = (__u64)&self->attest_buffer.meas; + self->uvio_attest.meas_len = sizeof(self->attest_buffer.meas); + + self->uvio_attest.add_data_addr = (__u64)&self->attest_buffer.add; + self->uvio_attest.add_data_len = sizeof(self->attest_buffer.add); + self->fault_page = + (__u64)mmap(NULL, (size_t)getpagesize(), PROT_NONE, MAP_ANONYMOUS, -1, 0); +} + +FIXTURE_TEARDOWN(attest_fixture) +{ + if (self->uv_fd) + close(self->uv_fd); + munmap((void *)self->fault_page, (size_t)getpagesize()); +} + +static void att_inval_sizes_test(uint32_t *size, uint32_t max_size, bool test_zero, + struct __test_metadata *_metadata, + FIXTURE_DATA(attest_fixture) *self) +{ + int rc, errno_cache; + uint32_t tmp = *size; + + if (test_zero) { + *size = 0; + rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + } + *size = max_size + 1; + rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + *size = tmp; +} + +/* + * Test to verify that attestation IOCTLs with invalid values in the UVIO + * attestation control block are rejected. + */ +TEST_F(attest_fixture, att_inval_request) +{ + int rc, errno_cache; + + att_inval_sizes_test(&self->uvio_attest.add_data_len, UVIO_ATT_ADDITIONAL_MAX_LEN, + false, _metadata, self); + att_inval_sizes_test(&self->uvio_attest.meas_len, UVIO_ATT_MEASUREMENT_MAX_LEN, + true, _metadata, self); + att_inval_sizes_test(&self->uvio_attest.arcb_len, UVIO_ATT_ARCB_MAX_LEN, + true, _metadata, self); + + self->uvio_attest.reserved136 = (uint16_t)-1; + rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EINVAL); + + memset(&self->uvio_attest, 0x11, sizeof(self->uvio_attest)); + rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl); + ASSERT_EQ(rc, -1); +} + +static void att_inval_addr_test(__u64 *addr, struct __test_metadata *_metadata, + FIXTURE_DATA(attest_fixture) *self) +{ + int rc, errno_cache; + __u64 tmp = *addr; + + *addr = 0; + rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EFAULT); + *addr = self->fault_page; + rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl); + errno_cache = errno; + ASSERT_EQ(rc, -1); + ASSERT_EQ(errno_cache, EFAULT); + *addr = tmp; +} + +TEST_F(attest_fixture, att_inval_addr) +{ + att_inval_addr_test(&self->uvio_attest.arcb_addr, _metadata, self); + att_inval_addr_test(&self->uvio_attest.add_data_addr, _metadata, self); + att_inval_addr_test(&self->uvio_attest.meas_addr, _metadata, self); +} + +static void __attribute__((constructor)) __constructor_order_last(void) +{ + if (!__constructor_order) + __constructor_order = _CONSTRUCTOR_ORDER_BACKWARD; +} + +int main(int argc, char **argv) +{ + int fd = open(UV_PATH, O_ACCMODE); + + if (fd < 0) + ksft_exit_skip("No uv-device or cannot access " UV_PATH "\n" + "Enable CONFIG_S390_UV_UAPI and check the access rights on " + UV_PATH ".\n"); + close(fd); + return test_harness_run(argc, argv); +} diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 0b0e4402bba6..4509a3a7eeae 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -2,7 +2,8 @@ /aarch64/arch_timer /aarch64/debug-exceptions /aarch64/get-reg-list -/aarch64/psci_cpu_on_test +/aarch64/hypercalls +/aarch64/psci_test /aarch64/vcpu_width_config /aarch64/vgic_init /aarch64/vgic_irq @@ -16,6 +17,7 @@ /x86_64/debug_regs /x86_64/evmcs_test /x86_64/emulator_error_test +/x86_64/fix_hypercall_test /x86_64/get_msr_index_features /x86_64/kvm_clock_test /x86_64/kvm_pv_test @@ -53,7 +55,7 @@ /x86_64/xen_shinfo_test /x86_64/xen_vmcall_test /x86_64/xss_msr_test -/x86_64/vmx_pmu_msrs_test +/x86_64/vmx_pmu_caps_test /access_tracking_perf_test /demand_paging_test /dirty_log_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 681b173aa87c..81470a99ed1c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -48,6 +48,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test TEST_GEN_PROGS_x86_64 += x86_64/emulator_error_test +TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features @@ -65,6 +66,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test +TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test @@ -81,7 +83,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xapic_state_test TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test -TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test +TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests @@ -105,7 +107,8 @@ TEST_GEN_PROGS_x86_64 += system_counter_offset_test TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list -TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test +TEST_GEN_PROGS_aarch64 += aarch64/hypercalls +TEST_GEN_PROGS_aarch64 += aarch64/psci_test TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index 0b571f3fe64c..d3a7dbfcbb3d 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -294,6 +294,11 @@ static void print_reg(struct vcpu_config *c, __u64 id) "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id); printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff); break; + case KVM_REG_ARM_FW_FEAT_BMAP: + TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff), + "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id); + printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff); + break; case KVM_REG_ARM64_SVE: if (has_cap(c, KVM_CAP_ARM_SVE)) printf("\t%s,\n", sve_id_to_str(c, id)); @@ -692,6 +697,9 @@ static __u64 base_regs[] = { KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */ KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */ KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */ + KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */ + KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */ + KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */ ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */ ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */ ARM64_SYS_REG(3, 3, 14, 0, 2), diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c new file mode 100644 index 000000000000..41e0210b7a5e --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface. + * + * The test validates the basic hypercall functionalities that are exposed + * via the psuedo-firmware bitmap register. This includes the registers' + * read/write behavior before and after the VM has started, and if the + * hypercalls are properly masked or unmasked to the guest when disabled or + * enabled from the KVM userspace, respectively. + */ + +#include <errno.h> +#include <linux/arm-smccc.h> +#include <asm/kvm.h> +#include <kvm_util.h> + +#include "processor.h" + +#define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0)) + +/* Last valid bits of the bitmapped firmware registers */ +#define KVM_REG_ARM_STD_BMAP_BIT_MAX 0 +#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0 +#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1 + +struct kvm_fw_reg_info { + uint64_t reg; /* Register definition */ + uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */ +}; + +#define FW_REG_INFO(r) \ + { \ + .reg = r, \ + .max_feat_bit = r##_BIT_MAX, \ + } + +static const struct kvm_fw_reg_info fw_reg_info[] = { + FW_REG_INFO(KVM_REG_ARM_STD_BMAP), + FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP), + FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP), +}; + +enum test_stage { + TEST_STAGE_REG_IFACE, + TEST_STAGE_HVC_IFACE_FEAT_DISABLED, + TEST_STAGE_HVC_IFACE_FEAT_ENABLED, + TEST_STAGE_HVC_IFACE_FALSE_INFO, + TEST_STAGE_END, +}; + +static int stage = TEST_STAGE_REG_IFACE; + +struct test_hvc_info { + uint32_t func_id; + uint64_t arg1; +}; + +#define TEST_HVC_INFO(f, a1) \ + { \ + .func_id = f, \ + .arg1 = a1, \ + } + +static const struct test_hvc_info hvc_info[] = { + /* KVM_REG_ARM_STD_BMAP */ + TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0), + TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64), + TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0), + TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0), + TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0), + + /* KVM_REG_ARM_STD_HYP_BMAP */ + TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES), + TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST), + TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0), + + /* KVM_REG_ARM_VENDOR_HYP_BMAP */ + TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, + ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID), + TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0), + TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER), +}; + +/* Feed false hypercall info to test the KVM behavior */ +static const struct test_hvc_info false_hvc_info[] = { + /* Feature support check against a different family of hypercalls */ + TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID), + TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64), + TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64), +}; + +static void guest_test_hvc(const struct test_hvc_info *hc_info) +{ + unsigned int i; + struct arm_smccc_res res; + unsigned int hvc_info_arr_sz; + + hvc_info_arr_sz = + hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info); + + for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) { + memset(&res, 0, sizeof(res)); + smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res); + + switch (stage) { + case TEST_STAGE_HVC_IFACE_FEAT_DISABLED: + case TEST_STAGE_HVC_IFACE_FALSE_INFO: + GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED, + res.a0, hc_info->func_id, hc_info->arg1); + break; + case TEST_STAGE_HVC_IFACE_FEAT_ENABLED: + GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED, + res.a0, hc_info->func_id, hc_info->arg1); + break; + default: + GUEST_ASSERT_1(0, stage); + } + } +} + +static void guest_code(void) +{ + while (stage != TEST_STAGE_END) { + switch (stage) { + case TEST_STAGE_REG_IFACE: + break; + case TEST_STAGE_HVC_IFACE_FEAT_DISABLED: + case TEST_STAGE_HVC_IFACE_FEAT_ENABLED: + guest_test_hvc(hvc_info); + break; + case TEST_STAGE_HVC_IFACE_FALSE_INFO: + guest_test_hvc(false_hvc_info); + break; + default: + GUEST_ASSERT_1(0, stage); + } + + GUEST_SYNC(stage); + } + + GUEST_DONE(); +} + +static int set_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t val) +{ + struct kvm_one_reg reg = { + .id = id, + .addr = (uint64_t)&val, + }; + + return _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, ®); +} + +static void get_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t *addr) +{ + struct kvm_one_reg reg = { + .id = id, + .addr = (uint64_t)addr, + }; + + vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, ®); +} + +struct st_time { + uint32_t rev; + uint32_t attr; + uint64_t st_time; +}; + +#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63) +#define ST_GPA_BASE (1 << 30) + +static void steal_time_init(struct kvm_vm *vm) +{ + uint64_t st_ipa = (ulong)ST_GPA_BASE; + unsigned int gpages; + struct kvm_device_attr dev = { + .group = KVM_ARM_VCPU_PVTIME_CTRL, + .attr = KVM_ARM_VCPU_PVTIME_IPA, + .addr = (uint64_t)&st_ipa, + }; + + gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE); + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); + + vcpu_ioctl(vm, 0, KVM_SET_DEVICE_ATTR, &dev); +} + +static void test_fw_regs_before_vm_start(struct kvm_vm *vm) +{ + uint64_t val; + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) { + const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; + + /* First 'read' should be an upper limit of the features supported */ + get_fw_reg(vm, reg_info->reg, &val); + TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), + "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n", + reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val); + + /* Test a 'write' by disabling all the features of the register map */ + ret = set_fw_reg(vm, reg_info->reg, 0); + TEST_ASSERT(ret == 0, + "Failed to clear all the features of reg: 0x%lx; ret: %d\n", + reg_info->reg, errno); + + get_fw_reg(vm, reg_info->reg, &val); + TEST_ASSERT(val == 0, + "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg); + + /* + * Test enabling a feature that's not supported. + * Avoid this check if all the bits are occupied. + */ + if (reg_info->max_feat_bit < 63) { + ret = set_fw_reg(vm, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); + TEST_ASSERT(ret != 0 && errno == EINVAL, + "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n", + errno, reg_info->reg); + } + } +} + +static void test_fw_regs_after_vm_start(struct kvm_vm *vm) +{ + uint64_t val; + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) { + const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; + + /* + * Before starting the VM, the test clears all the bits. + * Check if that's still the case. + */ + get_fw_reg(vm, reg_info->reg, &val); + TEST_ASSERT(val == 0, + "Expected all the features to be cleared for reg: 0x%lx\n", + reg_info->reg); + + /* + * Since the VM has run at least once, KVM shouldn't allow modification of + * the registers and should return EBUSY. Set the registers and check for + * the expected errno. + */ + ret = set_fw_reg(vm, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); + TEST_ASSERT(ret != 0 && errno == EBUSY, + "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n", + errno, reg_info->reg); + } +} + +static struct kvm_vm *test_vm_create(void) +{ + struct kvm_vm *vm; + + vm = vm_create_default(0, 0, guest_code); + + ucall_init(vm, NULL); + steal_time_init(vm); + + return vm; +} + +static struct kvm_vm *test_guest_stage(struct kvm_vm *vm) +{ + struct kvm_vm *ret_vm = vm; + + pr_debug("Stage: %d\n", stage); + + switch (stage) { + case TEST_STAGE_REG_IFACE: + test_fw_regs_after_vm_start(vm); + break; + case TEST_STAGE_HVC_IFACE_FEAT_DISABLED: + /* Start a new VM so that all the features are now enabled by default */ + kvm_vm_free(vm); + ret_vm = test_vm_create(); + break; + case TEST_STAGE_HVC_IFACE_FEAT_ENABLED: + case TEST_STAGE_HVC_IFACE_FALSE_INFO: + break; + default: + TEST_FAIL("Unknown test stage: %d\n", stage); + } + + stage++; + sync_global_to_guest(vm, stage); + + return ret_vm; +} + +static void test_run(void) +{ + struct kvm_vm *vm; + struct ucall uc; + bool guest_done = false; + + vm = test_vm_create(); + + test_fw_regs_before_vm_start(vm); + + while (!guest_done) { + vcpu_run(vm, 0); + + switch (get_ucall(vm, 0, &uc)) { + case UCALL_SYNC: + vm = test_guest_stage(vm); + break; + case UCALL_DONE: + guest_done = true; + break; + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld\n\tvalues: 0x%lx, 0x%lx; 0x%lx, stage: %u", + (const char *)uc.args[0], __FILE__, uc.args[1], + uc.args[2], uc.args[3], uc.args[4], stage); + break; + default: + TEST_FAIL("Unexpected guest exit\n"); + } + } + + kvm_vm_free(vm); +} + +int main(void) +{ + setbuf(stdout, NULL); + + test_run(); + return 0; +} diff --git a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c b/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c deleted file mode 100644 index 4c5f6814030f..000000000000 --- a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the - * CPU_ON PSCI call matches what the caller requested. - * - * Copyright (c) 2021 Google LLC. - * - * This is a regression test for a race between KVM servicing the PSCI call and - * userspace reading the vCPUs registers. - */ - -#define _GNU_SOURCE - -#include <linux/psci.h> - -#include "kvm_util.h" -#include "processor.h" -#include "test_util.h" - -#define VCPU_ID_SOURCE 0 -#define VCPU_ID_TARGET 1 - -#define CPU_ON_ENTRY_ADDR 0xfeedf00dul -#define CPU_ON_CONTEXT_ID 0xdeadc0deul - -static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, - uint64_t context_id) -{ - register uint64_t x0 asm("x0") = PSCI_0_2_FN64_CPU_ON; - register uint64_t x1 asm("x1") = target_cpu; - register uint64_t x2 asm("x2") = entry_addr; - register uint64_t x3 asm("x3") = context_id; - - asm("hvc #0" - : "=r"(x0) - : "r"(x0), "r"(x1), "r"(x2), "r"(x3) - : "memory"); - - return x0; -} - -static uint64_t psci_affinity_info(uint64_t target_affinity, - uint64_t lowest_affinity_level) -{ - register uint64_t x0 asm("x0") = PSCI_0_2_FN64_AFFINITY_INFO; - register uint64_t x1 asm("x1") = target_affinity; - register uint64_t x2 asm("x2") = lowest_affinity_level; - - asm("hvc #0" - : "=r"(x0) - : "r"(x0), "r"(x1), "r"(x2) - : "memory"); - - return x0; -} - -static void guest_main(uint64_t target_cpu) -{ - GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); - uint64_t target_state; - - do { - target_state = psci_affinity_info(target_cpu, 0); - - GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) || - (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF)); - } while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON); - - GUEST_DONE(); -} - -int main(void) -{ - uint64_t target_mpidr, obs_pc, obs_x0; - struct kvm_vcpu_init init; - struct kvm_vm *vm; - struct ucall uc; - - vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR); - kvm_vm_elf_load(vm, program_invocation_name); - ucall_init(vm, NULL); - - vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); - init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2); - - aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_main); - - /* - * make sure the target is already off when executing the test. - */ - init.features[0] |= (1 << KVM_ARM_VCPU_POWER_OFF); - aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_main); - - get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); - vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK); - vcpu_run(vm, VCPU_ID_SOURCE); - - switch (get_ucall(vm, VCPU_ID_SOURCE, &uc)) { - case UCALL_DONE: - break; - case UCALL_ABORT: - TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, - uc.args[1]); - break; - default: - TEST_FAIL("Unhandled ucall: %lu", uc.cmd); - } - - get_reg(vm, VCPU_ID_TARGET, ARM64_CORE_REG(regs.pc), &obs_pc); - get_reg(vm, VCPU_ID_TARGET, ARM64_CORE_REG(regs.regs[0]), &obs_x0); - - TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, - "unexpected target cpu pc: %lx (expected: %lx)", - obs_pc, CPU_ON_ENTRY_ADDR); - TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID, - "unexpected target context id: %lx (expected: %lx)", - obs_x0, CPU_ON_CONTEXT_ID); - - kvm_vm_free(vm); - return 0; -} diff --git a/tools/testing/selftests/kvm/aarch64/psci_test.c b/tools/testing/selftests/kvm/aarch64/psci_test.c new file mode 100644 index 000000000000..88541de21c41 --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/psci_test.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the + * CPU_ON PSCI call matches what the caller requested. + * + * Copyright (c) 2021 Google LLC. + * + * This is a regression test for a race between KVM servicing the PSCI call and + * userspace reading the vCPUs registers. + */ + +#define _GNU_SOURCE + +#include <linux/psci.h> + +#include "kvm_util.h" +#include "processor.h" +#include "test_util.h" + +#define VCPU_ID_SOURCE 0 +#define VCPU_ID_TARGET 1 + +#define CPU_ON_ENTRY_ADDR 0xfeedf00dul +#define CPU_ON_CONTEXT_ID 0xdeadc0deul + +static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, + uint64_t context_id) +{ + struct arm_smccc_res res; + + smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id, + 0, 0, 0, 0, &res); + + return res.a0; +} + +static uint64_t psci_affinity_info(uint64_t target_affinity, + uint64_t lowest_affinity_level) +{ + struct arm_smccc_res res; + + smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level, + 0, 0, 0, 0, 0, &res); + + return res.a0; +} + +static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) +{ + struct arm_smccc_res res; + + smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id, + 0, 0, 0, 0, 0, &res); + + return res.a0; +} + +static uint64_t psci_features(uint32_t func_id) +{ + struct arm_smccc_res res; + + smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res); + + return res.a0; +} + +static void vcpu_power_off(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct kvm_mp_state mp_state = { + .mp_state = KVM_MP_STATE_STOPPED, + }; + + vcpu_set_mp_state(vm, vcpuid, &mp_state); +} + +static struct kvm_vm *setup_vm(void *guest_code) +{ + struct kvm_vcpu_init init; + struct kvm_vm *vm; + + vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR); + kvm_vm_elf_load(vm, program_invocation_name); + ucall_init(vm, NULL); + + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); + init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2); + + aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_code); + aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_code); + + return vm; +} + +static void enter_guest(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct ucall uc; + + vcpu_run(vm, vcpuid); + if (get_ucall(vm, vcpuid, &uc) == UCALL_ABORT) + TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, + uc.args[1]); +} + +static void assert_vcpu_reset(struct kvm_vm *vm, uint32_t vcpuid) +{ + uint64_t obs_pc, obs_x0; + + get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &obs_pc); + get_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[0]), &obs_x0); + + TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, + "unexpected target cpu pc: %lx (expected: %lx)", + obs_pc, CPU_ON_ENTRY_ADDR); + TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID, + "unexpected target context id: %lx (expected: %lx)", + obs_x0, CPU_ON_CONTEXT_ID); +} + +static void guest_test_cpu_on(uint64_t target_cpu) +{ + uint64_t target_state; + + GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); + + do { + target_state = psci_affinity_info(target_cpu, 0); + + GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) || + (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF)); + } while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON); + + GUEST_DONE(); +} + +static void host_test_cpu_on(void) +{ + uint64_t target_mpidr; + struct kvm_vm *vm; + struct ucall uc; + + vm = setup_vm(guest_test_cpu_on); + + /* + * make sure the target is already off when executing the test. + */ + vcpu_power_off(vm, VCPU_ID_TARGET); + + get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); + vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK); + enter_guest(vm, VCPU_ID_SOURCE); + + if (get_ucall(vm, VCPU_ID_SOURCE, &uc) != UCALL_DONE) + TEST_FAIL("Unhandled ucall: %lu", uc.cmd); + + assert_vcpu_reset(vm, VCPU_ID_TARGET); + kvm_vm_free(vm); +} + +static void enable_system_suspend(struct kvm_vm *vm) +{ + struct kvm_enable_cap cap = { + .cap = KVM_CAP_ARM_SYSTEM_SUSPEND, + }; + + vm_enable_cap(vm, &cap); +} + +static void guest_test_system_suspend(void) +{ + uint64_t ret; + + /* assert that SYSTEM_SUSPEND is discoverable */ + GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND)); + GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND)); + + ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID); + GUEST_SYNC(ret); +} + +static void host_test_system_suspend(void) +{ + struct kvm_run *run; + struct kvm_vm *vm; + + vm = setup_vm(guest_test_system_suspend); + enable_system_suspend(vm); + + vcpu_power_off(vm, VCPU_ID_TARGET); + run = vcpu_state(vm, VCPU_ID_SOURCE); + + enter_guest(vm, VCPU_ID_SOURCE); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT, + "Unhandled exit reason: %u (%s)", + run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND, + "Unhandled system event: %u (expected: %u)", + run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND); + + kvm_vm_free(vm); +} + +int main(void) +{ + if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) { + print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported"); + exit(KSFT_SKIP); + } + + host_test_cpu_on(); + host_test_system_suspend(); + return 0; +} diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 8f9f46979a00..59ece9d4e0d1 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -185,4 +185,26 @@ static inline void local_irq_disable(void) asm volatile("msr daifset, #3" : : : "memory"); } +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * smccc_hvc - Invoke a SMCCC function using the hvc conduit + * @function_id: the SMCCC function to be called + * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 + * @res: pointer to write the return values from registers x0-x3 + * + */ +void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, + uint64_t arg6, struct arm_smccc_res *res); + #endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index eca5c622efd2..4fcfd1c0389d 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -119,10 +119,12 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, #define SATP_ASID_SHIFT 44 #define SATP_ASID_MASK _AC(0xFFFF, UL) -#define SBI_EXT_EXPERIMENTAL_START 0x08000000 -#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF +#define SBI_EXT_EXPERIMENTAL_START 0x08000000 +#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF -#define KVM_RISCV_SELFTESTS_SBI_EXT SBI_EXT_EXPERIMENTAL_END +#define KVM_RISCV_SELFTESTS_SBI_EXT SBI_EXT_EXPERIMENTAL_END +#define KVM_RISCV_SELFTESTS_SBI_UCALL 0 +#define KVM_RISCV_SELFTESTS_SBI_UNEXP 1 struct sbiret { long error; diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 9343d82519b4..6a041289fa80 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -500,3 +500,28 @@ void __attribute__((constructor)) init_guest_modes(void) { guest_modes_append_default(); } + +void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, + uint64_t arg6, struct arm_smccc_res *res) +{ + asm volatile("mov w0, %w[function_id]\n" + "mov x1, %[arg0]\n" + "mov x2, %[arg1]\n" + "mov x3, %[arg2]\n" + "mov x4, %[arg3]\n" + "mov x5, %[arg4]\n" + "mov x6, %[arg5]\n" + "mov x7, %[arg6]\n" + "hvc #0\n" + "mov %[res0], x0\n" + "mov %[res1], x1\n" + "mov %[res2], x2\n" + "mov %[res3], x3\n" + : [res0] "=r"(res->a0), [res1] "=r"(res->a1), + [res2] "=r"(res->a2), [res3] "=r"(res->a3) + : [function_id] "r"(function_id), [arg0] "r"(arg0), + [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3), + [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6) + : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"); +} diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 3961487a4870..abc0ae5a4fe1 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -268,10 +268,11 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6); } -static void __aligned(16) guest_hang(void) +static void __aligned(16) guest_unexp_trap(void) { - while (1) - ; + sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, + KVM_RISCV_SELFTESTS_SBI_UNEXP, + 0, 0, 0, 0, 0, 0); } void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) @@ -310,7 +311,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) /* Setup default exception vector of guest */ set_reg(vm, vcpuid, RISCV_CSR_REG(stvec), - (unsigned long)guest_hang); + (unsigned long)guest_unexp_trap); } void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) @@ -350,7 +351,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) case 7: id = RISCV_CORE_REG(regs.a7); break; - }; + } set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); } diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c index 9e42d8248fa6..8550f424d093 100644 --- a/tools/testing/selftests/kvm/lib/riscv/ucall.c +++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c @@ -60,8 +60,9 @@ void ucall(uint64_t cmd, int nargs, ...) uc.args[i] = va_arg(va, uint64_t); va_end(va); - sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, 0, (vm_vaddr_t)&uc, - 0, 0, 0, 0, 0); + sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, + KVM_RISCV_SELFTESTS_SBI_UCALL, + (vm_vaddr_t)&uc, 0, 0, 0, 0, 0); } uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) @@ -73,14 +74,24 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) memset(uc, 0, sizeof(*uc)); if (run->exit_reason == KVM_EXIT_RISCV_SBI && - run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT && - run->riscv_sbi.function_id == 0) { - memcpy(&ucall, addr_gva2hva(vm, run->riscv_sbi.args[0]), - sizeof(ucall)); - - vcpu_run_complete_io(vm, vcpu_id); - if (uc) - memcpy(uc, &ucall, sizeof(ucall)); + run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) { + switch (run->riscv_sbi.function_id) { + case KVM_RISCV_SELFTESTS_SBI_UCALL: + memcpy(&ucall, addr_gva2hva(vm, + run->riscv_sbi.args[0]), sizeof(ucall)); + + vcpu_run_complete_io(vm, vcpu_id); + if (uc) + memcpy(uc, &ucall, sizeof(ucall)); + + break; + case KVM_RISCV_SELFTESTS_SBI_UNEXP: + vcpu_dump(stderr, vm, vcpu_id, 2); + TEST_ASSERT(0, "Unexpected trap taken by guest"); + break; + default: + break; + } } return ucall.cmd; diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index b04c2c1b3c30..49f26f544127 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -10,6 +10,8 @@ #include <string.h> #include <sys/ioctl.h> +#include <linux/bits.h> + #include "test_util.h" #include "kvm_util.h" @@ -194,6 +196,7 @@ static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo) #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o) #define AR(a) ._ar = 1, .ar = (a) #define KEY(a) .f_key = 1, .key = (a) +#define INJECT .f_inject = 1 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); }) @@ -430,9 +433,18 @@ static void test_copy_key_fetch_prot(void) TEST_ASSERT(rv == 4, "Should result in protection exception"); \ }) +static void guest_error_key(void) +{ + GUEST_SYNC(STAGE_INITED); + set_storage_key_range(mem1, PAGE_SIZE, 0x18); + set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98); + GUEST_SYNC(STAGE_SKEYS_SET); + GUEST_SYNC(STAGE_IDLED); +} + static void test_errors_key(void) { - struct test_default t = test_default_init(guest_copy_key_fetch_prot); + struct test_default t = test_default_init(guest_error_key); HOST_SYNC(t.vcpu, STAGE_INITED); HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); @@ -446,6 +458,37 @@ static void test_errors_key(void) kvm_vm_free(t.kvm_vm); } +static void test_termination(void) +{ + struct test_default t = test_default_init(guest_error_key); + uint64_t prefix; + uint64_t teid; + uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); + uint64_t psw[2]; + + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vcpu, mismatching keys after first page */ + ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT); + /* + * The memop injected a program exception and the test needs to check the + * Translation-Exception Identification (TEID). It is necessary to run + * the guest in order to be able to read the TEID from guest memory. + * Set the guest program new PSW, so the guest state is not clobbered. + */ + prefix = t.run->s.regs.prefix; + psw[0] = t.run->psw_mask; + psw[1] = t.run->psw_addr; + MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464)); + HOST_SYNC(t.vcpu, STAGE_IDLED); + MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168)); + /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */ + ASSERT_EQ(teid & teid_mask, 0); + + kvm_vm_free(t.kvm_vm); +} + static void test_errors_key_storage_prot_override(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot); @@ -668,6 +711,7 @@ int main(int argc, char *argv[]) test_copy_key_fetch_prot(); test_copy_key_fetch_prot_override(); test_errors_key(); + test_termination(); test_errors_key_storage_prot_override(); test_errors_key_fetch_prot_override_not_enabled(); test_errors_key_fetch_prot_override_enabled(); diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index 62f2eb9ee3d5..8c4e811bd586 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -118,17 +118,10 @@ struct st_time { static int64_t smccc(uint32_t func, uint64_t arg) { - unsigned long ret; + struct arm_smccc_res res; - asm volatile( - "mov w0, %w1\n" - "mov x1, %2\n" - "hvc #0\n" - "mov %0, x0\n" - : "=r" (ret) : "r" (func), "r" (arg) : - "x0", "x1", "x2", "x3"); - - return ret; + smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res); + return res.a0; } static void check_status(struct st_time *st) diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c new file mode 100644 index 000000000000..1f5c32146f3d --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020, Google LLC. + * + * Tests for KVM paravirtual feature disablement + */ +#include <asm/kvm_para.h> +#include <linux/kvm_para.h> +#include <linux/stringify.h> +#include <stdint.h> + +#include "apic.h" +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 0 + +static bool ud_expected; + +static void guest_ud_handler(struct ex_regs *regs) +{ + GUEST_ASSERT(ud_expected); + GUEST_DONE(); +} + +extern unsigned char svm_hypercall_insn; +static uint64_t svm_do_sched_yield(uint8_t apic_id) +{ + uint64_t ret; + + asm volatile("mov %1, %%rax\n\t" + "mov %2, %%rbx\n\t" + "svm_hypercall_insn:\n\t" + "vmmcall\n\t" + "mov %%rax, %0\n\t" + : "=r"(ret) + : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) + : "rax", "rbx", "memory"); + + return ret; +} + +extern unsigned char vmx_hypercall_insn; +static uint64_t vmx_do_sched_yield(uint8_t apic_id) +{ + uint64_t ret; + + asm volatile("mov %1, %%rax\n\t" + "mov %2, %%rbx\n\t" + "vmx_hypercall_insn:\n\t" + "vmcall\n\t" + "mov %%rax, %0\n\t" + : "=r"(ret) + : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) + : "rax", "rbx", "memory"); + + return ret; +} + +static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn) +{ + uint32_t exp = 0, obs = 0; + + memcpy(&exp, exp_insn, sizeof(exp)); + memcpy(&obs, obs_insn, sizeof(obs)); + + GUEST_ASSERT_EQ(exp, obs); +} + +static void guest_main(void) +{ + unsigned char *native_hypercall_insn, *hypercall_insn; + uint8_t apic_id; + + apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); + + if (is_intel_cpu()) { + native_hypercall_insn = &vmx_hypercall_insn; + hypercall_insn = &svm_hypercall_insn; + svm_do_sched_yield(apic_id); + } else if (is_amd_cpu()) { + native_hypercall_insn = &svm_hypercall_insn; + hypercall_insn = &vmx_hypercall_insn; + vmx_do_sched_yield(apic_id); + } else { + GUEST_ASSERT(0); + /* unreachable */ + return; + } + + GUEST_ASSERT(!ud_expected); + assert_hypercall_insn(native_hypercall_insn, hypercall_insn); + GUEST_DONE(); +} + +static void setup_ud_vector(struct kvm_vm *vm) +{ + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); +} + +static void enter_guest(struct kvm_vm *vm) +{ + struct kvm_run *run; + struct ucall uc; + + run = vcpu_state(vm, VCPU_ID); + + vcpu_run(vm, VCPU_ID); + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_SYNC: + pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); + break; + case UCALL_DONE: + return; + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, uc.args[1]); + default: + TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)", + uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason)); + } +} + +static void test_fix_hypercall(void) +{ + struct kvm_vm *vm; + + vm = vm_create_default(VCPU_ID, 0, guest_main); + setup_ud_vector(vm); + + ud_expected = false; + sync_global_to_guest(vm, ud_expected); + + virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); + + enter_guest(vm); +} + +static void test_fix_hypercall_disabled(void) +{ + struct kvm_enable_cap cap = {0}; + struct kvm_vm *vm; + + vm = vm_create_default(VCPU_ID, 0, guest_main); + setup_ud_vector(vm); + + cap.cap = KVM_CAP_DISABLE_QUIRKS2; + cap.args[0] = KVM_X86_QUIRK_FIX_HYPERCALL_INSN; + vm_enable_cap(vm, &cap); + + ud_expected = true; + sync_global_to_guest(vm, ud_expected); + + virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); + + enter_guest(vm); +} + +int main(void) +{ + if (!(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { + print_skip("KVM_X86_QUIRK_HYPERCALL_INSN not supported"); + exit(KSFT_SKIP); + } + + test_fix_hypercall(); + test_fix_hypercall_disabled(); +} diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c new file mode 100644 index 000000000000..f0083d8cfe98 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * svm_vmcall_test + * + * Copyright © 2021 Amazon.com, Inc. or its affiliates. + * + * Xen shared_info / pvclock testing + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#include <stdint.h> +#include <time.h> +#include <sched.h> +#include <signal.h> +#include <pthread.h> + +#define NR_TEST_VCPUS 20 + +static struct kvm_vm *vm; +pthread_spinlock_t create_lock; + +#define TEST_TSC_KHZ 2345678UL +#define TEST_TSC_OFFSET 200000000 + +uint64_t tsc_sync; +static void guest_code(void) +{ + uint64_t start_tsc, local_tsc, tmp; + + start_tsc = rdtsc(); + do { + tmp = READ_ONCE(tsc_sync); + local_tsc = rdtsc(); + WRITE_ONCE(tsc_sync, local_tsc); + if (unlikely(local_tsc < tmp)) + GUEST_SYNC_ARGS(0, local_tsc, tmp, 0, 0); + + } while (local_tsc - start_tsc < 5000 * TEST_TSC_KHZ); + + GUEST_DONE(); +} + + +static void *run_vcpu(void *_cpu_nr) +{ + unsigned long cpu = (unsigned long)_cpu_nr; + unsigned long failures = 0; + static bool first_cpu_done; + + /* The kernel is fine, but vm_vcpu_add_default() needs locking */ + pthread_spin_lock(&create_lock); + + vm_vcpu_add_default(vm, cpu, guest_code); + + if (!first_cpu_done) { + first_cpu_done = true; + vcpu_set_msr(vm, cpu, MSR_IA32_TSC, TEST_TSC_OFFSET); + } + + pthread_spin_unlock(&create_lock); + + for (;;) { + volatile struct kvm_run *run = vcpu_state(vm, cpu); + struct ucall uc; + + vcpu_run(vm, cpu); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, cpu, &uc)) { + case UCALL_DONE: + goto out; + + case UCALL_SYNC: + printf("Guest %ld sync %lx %lx %ld\n", cpu, uc.args[2], uc.args[3], uc.args[2] - uc.args[3]); + failures++; + break; + + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + } + out: + return (void *)failures; +} + +int main(int argc, char *argv[]) +{ + if (!kvm_check_cap(KVM_CAP_VM_TSC_CONTROL)) { + print_skip("KVM_CAP_VM_TSC_CONTROL not available"); + exit(KSFT_SKIP); + } + + vm = vm_create_default_with_vcpus(0, DEFAULT_STACK_PGS * NR_TEST_VCPUS, 0, guest_code, NULL); + vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ); + + pthread_spin_init(&create_lock, PTHREAD_PROCESS_PRIVATE); + pthread_t cpu_threads[NR_TEST_VCPUS]; + unsigned long cpu; + for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++) + pthread_create(&cpu_threads[cpu], NULL, run_vcpu, (void *)cpu); + + unsigned long failures = 0; + for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++) { + void *this_cpu_failures; + pthread_join(cpu_threads[cpu], &this_cpu_failures); + failures += (unsigned long)this_cpu_failures; + } + + TEST_ASSERT(!failures, "TSC sync failed"); + pthread_spin_destroy(&create_lock); + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 2454a1f2ca0c..97b7fd4a9a3d 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -1,15 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* - * VMX-pmu related msrs test + * Test for VMX-pmu perf capability msr * * Copyright (C) 2021 Intel Corporation * - * Test to check the effect of various CPUID settings - * on the MSR_IA32_PERF_CAPABILITIES MSR, and check that - * whatever we write with KVM_SET_MSR is _not_ modified - * in the guest and test it can be retrieved with KVM_GET_MSR. - * - * Test to check that invalid LBR formats are rejected. + * Test to check the effect of various CPUID settings on + * MSR_IA32_PERF_CAPABILITIES MSR, and check that what + * we write with KVM_SET_MSR is _not_ modified by the guest + * and check it can be retrieved with KVM_GET_MSR, also test + * the invalid LBR formats are rejected. */ #define _GNU_SOURCE /* for program_invocation_short_name */ @@ -107,8 +106,11 @@ int main(int argc, char *argv[]) ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); /* testcase 3, check invalid LBR format is rejected */ - ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); + /* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f, + * to avoid the failure, use a true invalid format 0x30 for the test. */ + ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0x30); TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); + printf("Completed perf capability tests.\n"); kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index bcd370827859..7a51bb648fbb 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -38,12 +38,36 @@ #define EVTCHN_VECTOR 0x10 +#define EVTCHN_TEST1 15 +#define EVTCHN_TEST2 66 +#define EVTCHN_TIMER 13 + static struct kvm_vm *vm; #define XEN_HYPERCALL_MSR 0x40000000 #define MIN_STEAL_TIME 50000 +#define __HYPERVISOR_set_timer_op 15 +#define __HYPERVISOR_sched_op 29 +#define __HYPERVISOR_event_channel_op 32 + +#define SCHEDOP_poll 3 + +#define EVTCHNOP_send 4 + +#define EVTCHNSTAT_interdomain 2 + +struct evtchn_send { + u32 port; +}; + +struct sched_poll { + u32 *ports; + unsigned int nr_ports; + u64 timeout; +}; + struct pvclock_vcpu_time_info { u32 version; u32 pad0; @@ -106,15 +130,25 @@ struct { struct kvm_irq_routing_entry entries[2]; } irq_routes; +bool guest_saw_irq; + static void evtchn_handler(struct ex_regs *regs) { struct vcpu_info *vi = (void *)VCPU_INFO_VADDR; vi->evtchn_upcall_pending = 0; vi->evtchn_pending_sel = 0; + guest_saw_irq = true; GUEST_SYNC(0x20); } +static void guest_wait_for_irq(void) +{ + while (!guest_saw_irq) + __asm__ __volatile__ ("rep nop" : : : "memory"); + guest_saw_irq = false; +} + static void guest_code(void) { struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR; @@ -127,6 +161,8 @@ static void guest_code(void) /* Trigger an interrupt injection */ GUEST_SYNC(0); + guest_wait_for_irq(); + /* Test having the host set runstates manually */ GUEST_SYNC(RUNSTATE_runnable); GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0); @@ -167,14 +203,132 @@ static void guest_code(void) /* Now deliver an *unmasked* interrupt */ GUEST_SYNC(8); - while (!si->evtchn_pending[1]) - __asm__ __volatile__ ("rep nop" : : : "memory"); + guest_wait_for_irq(); /* Change memslots and deliver an interrupt */ GUEST_SYNC(9); - for (;;) - __asm__ __volatile__ ("rep nop" : : : "memory"); + guest_wait_for_irq(); + + /* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */ + GUEST_SYNC(10); + + guest_wait_for_irq(); + + GUEST_SYNC(11); + + /* Our turn. Deliver event channel (to ourselves) with + * EVTCHNOP_send hypercall. */ + unsigned long rax; + struct evtchn_send s = { .port = 127 }; + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_event_channel_op), + "D" (EVTCHNOP_send), + "S" (&s)); + + GUEST_ASSERT(rax == 0); + + guest_wait_for_irq(); + + GUEST_SYNC(12); + + /* Deliver "outbound" event channel to an eventfd which + * happens to be one of our own irqfds. */ + s.port = 197; + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_event_channel_op), + "D" (EVTCHNOP_send), + "S" (&s)); + + GUEST_ASSERT(rax == 0); + + guest_wait_for_irq(); + + GUEST_SYNC(13); + + /* Set a timer 100ms in the future. */ + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_set_timer_op), + "D" (rs->state_entry_time + 100000000)); + GUEST_ASSERT(rax == 0); + + GUEST_SYNC(14); + + /* Now wait for the timer */ + guest_wait_for_irq(); + + GUEST_SYNC(15); + + /* The host has 'restored' the timer. Just wait for it. */ + guest_wait_for_irq(); + + GUEST_SYNC(16); + + /* Poll for an event channel port which is already set */ + u32 ports[1] = { EVTCHN_TIMER }; + struct sched_poll p = { + .ports = ports, + .nr_ports = 1, + .timeout = 0, + }; + + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_sched_op), + "D" (SCHEDOP_poll), + "S" (&p)); + + GUEST_ASSERT(rax == 0); + + GUEST_SYNC(17); + + /* Poll for an unset port and wait for the timeout. */ + p.timeout = 100000000; + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_sched_op), + "D" (SCHEDOP_poll), + "S" (&p)); + + GUEST_ASSERT(rax == 0); + + GUEST_SYNC(18); + + /* A timer will wake the masked port we're waiting on, while we poll */ + p.timeout = 0; + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_sched_op), + "D" (SCHEDOP_poll), + "S" (&p)); + + GUEST_ASSERT(rax == 0); + + GUEST_SYNC(19); + + /* A timer wake an *unmasked* port which should wake us with an + * actual interrupt, while we're polling on a different port. */ + ports[0]++; + p.timeout = 0; + __asm__ __volatile__ ("vmcall" : + "=a" (rax) : + "a" (__HYPERVISOR_sched_op), + "D" (SCHEDOP_poll), + "S" (&p)); + + GUEST_ASSERT(rax == 0); + + guest_wait_for_irq(); + + GUEST_SYNC(20); + + /* Timer should have fired already */ + guest_wait_for_irq(); + + GUEST_SYNC(21); } static int cmp_timespec(struct timespec *a, struct timespec *b) @@ -190,9 +344,13 @@ static int cmp_timespec(struct timespec *a, struct timespec *b) else return 0; } +struct vcpu_info *vinfo; static void handle_alrm(int sig) { + if (vinfo) + printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending); + vcpu_dump(stdout, vm, VCPU_ID, 0); TEST_FAIL("IRQ delivery timed out"); } @@ -212,6 +370,7 @@ int main(int argc, char *argv[]) bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE); bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL); + bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND); clock_gettime(CLOCK_REALTIME, &min_ts); @@ -232,6 +391,12 @@ int main(int argc, char *argv[]) .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, .msr = XEN_HYPERCALL_MSR, }; + + /* Let the kernel know that we *will* use it for sending all + * event channels, which lets it intercept SCHEDOP_poll */ + if (do_evtchn_tests) + hvmc.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND; + vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc); struct kvm_xen_hvm_attr lm = { @@ -294,7 +459,7 @@ int main(int argc, char *argv[]) /* Unexpected, but not a KVM failure */ if (irq_fd[0] == -1 || irq_fd[1] == -1) - do_eventfd_tests = false; + do_evtchn_tests = do_eventfd_tests = false; } if (do_eventfd_tests) { @@ -302,13 +467,13 @@ int main(int argc, char *argv[]) irq_routes.entries[0].gsi = 32; irq_routes.entries[0].type = KVM_IRQ_ROUTING_XEN_EVTCHN; - irq_routes.entries[0].u.xen_evtchn.port = 15; + irq_routes.entries[0].u.xen_evtchn.port = EVTCHN_TEST1; irq_routes.entries[0].u.xen_evtchn.vcpu = VCPU_ID; irq_routes.entries[0].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; irq_routes.entries[1].gsi = 33; irq_routes.entries[1].type = KVM_IRQ_ROUTING_XEN_EVTCHN; - irq_routes.entries[1].u.xen_evtchn.port = 66; + irq_routes.entries[1].u.xen_evtchn.port = EVTCHN_TEST2; irq_routes.entries[1].u.xen_evtchn.vcpu = VCPU_ID; irq_routes.entries[1].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; @@ -329,7 +494,39 @@ int main(int argc, char *argv[]) sigaction(SIGALRM, &sa, NULL); } - struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR); + struct kvm_xen_vcpu_attr tmr = { + .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER, + .u.timer.port = EVTCHN_TIMER, + .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, + .u.timer.expires_ns = 0 + }; + + if (do_evtchn_tests) { + struct kvm_xen_hvm_attr inj = { + .type = KVM_XEN_ATTR_TYPE_EVTCHN, + .u.evtchn.send_port = 127, + .u.evtchn.type = EVTCHNSTAT_interdomain, + .u.evtchn.flags = 0, + .u.evtchn.deliver.port.port = EVTCHN_TEST1, + .u.evtchn.deliver.port.vcpu = VCPU_ID + 1, + .u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj); + + /* Test migration to a different vCPU */ + inj.u.evtchn.flags = KVM_XEN_EVTCHN_UPDATE; + inj.u.evtchn.deliver.port.vcpu = VCPU_ID; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj); + + inj.u.evtchn.send_port = 197; + inj.u.evtchn.deliver.eventfd.port = 0; + inj.u.evtchn.deliver.eventfd.fd = irq_fd[1]; + inj.u.evtchn.flags = 0; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj); + + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &tmr); + } + vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR); vinfo->evtchn_upcall_pending = 0; struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR); @@ -422,7 +619,7 @@ int main(int argc, char *argv[]) goto done; if (verbose) printf("Testing masked event channel\n"); - shinfo->evtchn_mask[0] = 0x8000; + shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1; eventfd_write(irq_fd[0], 1UL); alarm(1); break; @@ -439,6 +636,9 @@ int main(int argc, char *argv[]) break; case 9: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + shinfo->evtchn_pending[1] = 0; if (verbose) printf("Testing event channel after memslot change\n"); vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, @@ -448,12 +648,153 @@ int main(int argc, char *argv[]) alarm(1); break; + case 10: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + if (!do_evtchn_tests) + goto done; + + shinfo->evtchn_pending[0] = 0; + if (verbose) + printf("Testing injection with KVM_XEN_HVM_EVTCHN_SEND\n"); + + struct kvm_irq_routing_xen_evtchn e; + e.port = EVTCHN_TEST2; + e.vcpu = VCPU_ID; + e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &e); + evtchn_irq_expected = true; + alarm(1); + break; + + case 11: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + shinfo->evtchn_pending[1] = 0; + + if (verbose) + printf("Testing guest EVTCHNOP_send direct to evtchn\n"); + evtchn_irq_expected = true; + alarm(1); + break; + + case 12: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + shinfo->evtchn_pending[0] = 0; + + if (verbose) + printf("Testing guest EVTCHNOP_send to eventfd\n"); + evtchn_irq_expected = true; + alarm(1); + break; + + case 13: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + shinfo->evtchn_pending[1] = 0; + + if (verbose) + printf("Testing guest oneshot timer\n"); + break; + + case 14: + memset(&tmr, 0, sizeof(tmr)); + tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_GET_ATTR, &tmr); + TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER, + "Timer port not returned"); + TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, + "Timer priority not returned"); + TEST_ASSERT(tmr.u.timer.expires_ns > rs->state_entry_time, + "Timer expiry not returned"); + evtchn_irq_expected = true; + alarm(1); + break; + + case 15: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + shinfo->evtchn_pending[0] = 0; + + if (verbose) + printf("Testing restored oneshot timer\n"); + + tmr.u.timer.expires_ns = rs->state_entry_time + 100000000, + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &tmr); + evtchn_irq_expected = true; + alarm(1); + break; + + case 16: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + + if (verbose) + printf("Testing SCHEDOP_poll with already pending event\n"); + shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 1UL << EVTCHN_TIMER; + alarm(1); + break; + + case 17: + if (verbose) + printf("Testing SCHEDOP_poll timeout\n"); + shinfo->evtchn_pending[0] = 0; + alarm(1); + break; + + case 18: + if (verbose) + printf("Testing SCHEDOP_poll wake on masked event\n"); + + tmr.u.timer.expires_ns = rs->state_entry_time + 100000000, + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &tmr); + alarm(1); + break; + + case 19: + shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0; + if (verbose) + printf("Testing SCHEDOP_poll wake on unmasked event\n"); + + evtchn_irq_expected = true; + tmr.u.timer.expires_ns = rs->state_entry_time + 100000000; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &tmr); + + /* Read it back and check the pending time is reported correctly */ + tmr.u.timer.expires_ns = 0; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_GET_ATTR, &tmr); + TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000, + "Timer not reported pending"); + alarm(1); + break; + + case 20: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + /* Read timer and check it is no longer pending */ + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_GET_ATTR, &tmr); + TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending"); + + shinfo->evtchn_pending[0] = 0; + if (verbose) + printf("Testing timer in the past\n"); + + evtchn_irq_expected = true; + tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &tmr); + alarm(1); + break; + + case 21: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + goto done; + case 0x20: TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ"); evtchn_irq_expected = false; - if (shinfo->evtchn_pending[1] && - shinfo->evtchn_pending[0]) - goto done; break; } break; @@ -466,6 +807,7 @@ int main(int argc, char *argv[]) } done: + alarm(0); clock_gettime(CLOCK_REALTIME, &max_ts); /* |