aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/cpuid.h
diff options
context:
space:
mode:
authorRadim Krčmář2017-08-05 00:12:49 +0200
committerPaolo Bonzini2017-08-07 16:11:50 +0200
commitd6321d493319bfd406c484e8359c6101cbda39d3 (patch)
tree97b80d5e78b3b5490e377942ca0fbe43bf65a21f /arch/x86/kvm/cpuid.h
parentc6bd18011ff8ea23473a1f4c6d934f761879081d (diff)
KVM: x86: generalize guest_cpuid_has_ helpers
This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid, X86_FEATURE_XYZ), which gets rid of many very similar helpers. When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but this information isn't in common code, so we recreate it for KVM. Add some BUILD_BUG_ONs to make sure that it runs nicely. Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/cpuid.h')
-rw-r--r--arch/x86/kvm/cpuid.h170
1 files changed, 57 insertions, 113 deletions
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c723d64657d0..4e9ac93b4f3a 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -3,6 +3,7 @@
#include "x86.h"
#include <asm/cpu.h>
+#include <asm/processor.h>
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
bool kvm_mpx_supported(void);
@@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
return vcpu->arch.maxphyaddr;
}
-static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
+struct cpuid_reg {
+ u32 function;
+ u32 index;
+ int reg;
+};
- if (!static_cpu_has(X86_FEATURE_XSAVE))
- return false;
-
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- return best && (best->ecx & bit(X86_FEATURE_XSAVE));
-}
+static const struct cpuid_reg reverse_cpuid[] = {
+ [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
+ [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
+ [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
+ [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
+ [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
+ [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
+ [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
+ [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
+ [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
+ [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
+ [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
+ [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
+ [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
+};
-static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
+static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
{
- struct kvm_cpuid_entry2 *best;
+ unsigned x86_leaf = x86_feature / 32;
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- return best && (best->edx & bit(X86_FEATURE_MTRR));
-}
+ BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
+ BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+ BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
-static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
-}
-
-static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_SMEP));
+ return reverse_cpuid[x86_leaf];
}
-static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
+static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_SMAP));
-}
-
-static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
-}
-
-static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ecx & bit(X86_FEATURE_PKU));
-}
-
-static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- return best && (best->edx & bit(X86_FEATURE_LM));
-}
+ struct kvm_cpuid_entry2 *entry;
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
-static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
+ entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
+ if (!entry)
+ return NULL;
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- return best && (best->ecx & bit(X86_FEATURE_OSVW));
+ switch (cpuid.reg) {
+ case CPUID_EAX:
+ return &entry->eax;
+ case CPUID_EBX:
+ return &entry->ebx;
+ case CPUID_ECX:
+ return &entry->ecx;
+ case CPUID_EDX:
+ return &entry->edx;
+ default:
+ BUILD_BUG();
+ return NULL;
+ }
}
-static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
+static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
{
- struct kvm_cpuid_entry2 *best;
+ int *reg;
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- return best && (best->ecx & bit(X86_FEATURE_PCID));
-}
+ if (x86_feature == X86_FEATURE_XSAVE &&
+ !static_cpu_has(X86_FEATURE_XSAVE))
+ return false;
-static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
+ reg = guest_cpuid_get_register(vcpu, x86_feature);
+ if (!reg)
+ return false;
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- return best && (best->ecx & bit(X86_FEATURE_X2APIC));
+ return *reg & bit(x86_feature);
}
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
@@ -128,46 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
}
-static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- return best && (best->edx & bit(X86_FEATURE_GBPAGES));
-}
-
-static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_RTM));
-}
-
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_MPX));
-}
-
-static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- return best && (best->edx & bit(X86_FEATURE_RDTSCP));
-}
-
-static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
- return best && (best->edx & bit(X86_FEATURE_NRIPS));
-}
-
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;