diff options
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/Kconfig | 4 | ||||
-rw-r--r-- | drivers/kvm/kvm.h | 8 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 38 | ||||
-rw-r--r-- | drivers/kvm/kvm_svm.h | 2 | ||||
-rw-r--r-- | drivers/kvm/kvm_vmx.h | 2 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 17 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 20 | ||||
-rw-r--r-- | drivers/kvm/svm.c | 78 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 62 | ||||
-rw-r--r-- | drivers/kvm/x86_emulate.c | 8 | ||||
-rw-r--r-- | drivers/kvm/x86_emulate.h | 2 |
11 files changed, 92 insertions, 149 deletions
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig index 36412e90f09b..703cc88d1ef9 100644 --- a/drivers/kvm/Kconfig +++ b/drivers/kvm/Kconfig @@ -1,6 +1,8 @@ # # KVM configuration # +menu "Virtualization" + config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on X86 && EXPERIMENTAL @@ -31,3 +33,5 @@ config KVM_AMD ---help--- Provides support for KVM on AMD processors equipped with the AMD-V (SVM) extensions. + +endmenu diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 5785d0870ab6..930e04ce1af6 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -140,7 +140,7 @@ enum { VCPU_REGS_RBP = 5, VCPU_REGS_RSI = 6, VCPU_REGS_RDI = 7, -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 VCPU_REGS_R8 = 8, VCPU_REGS_R9 = 9, VCPU_REGS_R10 = 10, @@ -375,7 +375,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 void set_efer(struct kvm_vcpu *vcpu, u64 efer); #endif @@ -485,7 +485,7 @@ static inline unsigned long read_tr_base(void) return segment_base(tr); } -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 static inline unsigned long read_msr(unsigned long msr) { u64 value; @@ -533,7 +533,7 @@ static inline u32 get_rdx_init_val(void) #define TSS_REDIRECTION_SIZE (256 / 8) #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 /* * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index b6b8a41b5ec8..fd1bb870545c 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -72,18 +72,7 @@ static struct dentry *debugfs_dir; #define CR8_RESEVED_BITS (~0x0fULL) #define EFER_RESERVED_BITS 0xfffffffffffff2fe -struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) -{ - int i; - - for (i = 0; i < vcpu->nmsrs; ++i) - if (vcpu->guest_msrs[i].index == msr) - return &vcpu->guest_msrs[i]; - return 0; -} -EXPORT_SYMBOL_GPL(find_msr_entry); - -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 // LDT or TSS descriptor in the GDT. 16 bytes. struct segment_descriptor_64 { struct segment_descriptor s; @@ -115,7 +104,7 @@ unsigned long segment_base(u16 selector) } d = (struct segment_descriptor *)(table_base + (selector & ~7)); v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24); -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32; @@ -216,7 +205,6 @@ static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) static void vcpu_put(struct kvm_vcpu *vcpu) { kvm_arch_ops->vcpu_put(vcpu); - put_cpu(); mutex_unlock(&vcpu->mutex); } @@ -351,7 +339,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 if ((vcpu->shadow_efer & EFER_LME)) { int cs_db, cs_l; @@ -1120,12 +1108,10 @@ static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) return kvm_arch_ops->get_msr(vcpu, msr_index, pdata); } -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 void set_efer(struct kvm_vcpu *vcpu, u64 efer) { - struct vmx_msr_entry *msr; - if (efer & EFER_RESERVED_BITS) { printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", efer); @@ -1140,16 +1126,12 @@ void set_efer(struct kvm_vcpu *vcpu, u64 efer) return; } + kvm_arch_ops->set_efer(vcpu, efer); + efer &= ~EFER_LMA; efer |= vcpu->shadow_efer & EFER_LMA; vcpu->shadow_efer = efer; - - msr = find_msr_entry(vcpu, MSR_EFER); - - if (!(efer & EFER_LMA)) - efer &= ~EFER_LME; - msr->data = efer; } EXPORT_SYMBOL_GPL(set_efer); @@ -1243,7 +1225,7 @@ static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs) regs->rdi = vcpu->regs[VCPU_REGS_RDI]; regs->rsp = vcpu->regs[VCPU_REGS_RSP]; regs->rbp = vcpu->regs[VCPU_REGS_RBP]; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 regs->r8 = vcpu->regs[VCPU_REGS_R8]; regs->r9 = vcpu->regs[VCPU_REGS_R9]; regs->r10 = vcpu->regs[VCPU_REGS_R10]; @@ -1287,7 +1269,7 @@ static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs) vcpu->regs[VCPU_REGS_RDI] = regs->rdi; vcpu->regs[VCPU_REGS_RSP] = regs->rsp; vcpu->regs[VCPU_REGS_RBP] = regs->rbp; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 vcpu->regs[VCPU_REGS_R8] = regs->r8; vcpu->regs[VCPU_REGS_R9] = regs->r9; vcpu->regs[VCPU_REGS_R10] = regs->r10; @@ -1401,7 +1383,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) vcpu->cr8 = sregs->cr8; mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 kvm_arch_ops->set_efer(vcpu, sregs->efer); #endif vcpu->apic_base = sregs->apic_base; @@ -1434,7 +1416,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) static u32 msrs_to_save[] = { MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_K6_STAR, -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TIME_STAMP_COUNTER, diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h index 7d7f2aa10960..74cc862f4935 100644 --- a/drivers/kvm/kvm_svm.h +++ b/drivers/kvm/kvm_svm.h @@ -9,7 +9,7 @@ #include "kvm.h" static const u32 host_save_msrs[] = { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, MSR_FS_BASE, MSR_GS_BASE, #endif diff --git a/drivers/kvm/kvm_vmx.h b/drivers/kvm/kvm_vmx.h index 87e12d2bfa16..d139f73fb6e1 100644 --- a/drivers/kvm/kvm_vmx.h +++ b/drivers/kvm/kvm_vmx.h @@ -1,7 +1,7 @@ #ifndef __KVM_VMX_H #define __KVM_VMX_H -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 /* * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt * mechanism (cpu bug AA24) diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 4e29d9b7211c..3d367cbfe1f9 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -61,22 +61,9 @@ #define PT32_PTE_COPY_MASK \ - (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \ - PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_PAT_MASK | \ - PT_GLOBAL_MASK ) - -#define PT32_NON_PTE_COPY_MASK \ - (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \ - PT_ACCESSED_MASK | PT_DIRTY_MASK) - - -#define PT64_PTE_COPY_MASK \ - (PT64_NX_MASK | PT32_PTE_COPY_MASK) - -#define PT64_NON_PTE_COPY_MASK \ - (PT64_NX_MASK | PT32_NON_PTE_COPY_MASK) - + (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK) +#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK) #define PT_FIRST_AVAIL_BITS_SHIFT 9 #define PT64_SECOND_AVAIL_BITS_SHIFT 52 diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 765c2e1a048e..a9771b4c5bb8 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h @@ -32,7 +32,6 @@ #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK - #define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK #elif PTTYPE == 32 #define pt_element_t u32 #define guest_walker guest_walker32 @@ -43,7 +42,6 @@ #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK - #define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK #else #error Invalid PTTYPE value #endif @@ -105,9 +103,7 @@ static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, if (PTTYPE == 32 && is_cpuid_PSE36()) gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << (32 - PT32_DIR_PSE36_SHIFT); - *shadow_pte = (guest_pde & (PT_NON_PTE_COPY_MASK | PT_GLOBAL_MASK)) | - ((guest_pde & PT_DIR_PAT_MASK) >> - (PT_DIR_PAT_SHIFT - PT_PAT_SHIFT)); + *shadow_pte = guest_pde & PT_PTE_COPY_MASK; set_pte_common(vcpu, shadow_pte, gaddr, guest_pde & PT_DIRTY_MASK, access_bits); } @@ -162,6 +158,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, u32 index = SHADOW_PT_INDEX(addr, level); u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; pt_element_t *guest_ent; + u64 shadow_pte; if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { if (level == PT_PAGE_TABLE_LEVEL) @@ -204,14 +201,11 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); if (!VALID_PAGE(shadow_addr)) return ERR_PTR(-ENOMEM); - if (!kvm_arch_ops->is_long_mode(vcpu) && level == 3) - *shadow_ent = shadow_addr | - (*guest_ent & (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK)); - else { - *shadow_ent = shadow_addr | - (*guest_ent & PT_NON_PTE_COPY_MASK); - *shadow_ent |= (PT_WRITABLE_MASK | PT_USER_MASK); - } + shadow_pte = shadow_addr | PT_PRESENT_MASK; + if (vcpu->mmu.root_level > 3 || level != 3) + shadow_pte |= PT_ACCESSED_MASK + | PT_WRITABLE_MASK | PT_USER_MASK; + *shadow_ent = shadow_pte; prev_shadow_ent = shadow_ent; } } diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index a33a89c68138..0e6bc8c649ce 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -287,7 +287,7 @@ static void svm_hardware_enable(void *garbage) struct svm_cpu_data *svm_data; uint64_t efer; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 struct desc_ptr gdt_descr; #else struct Xgt_desc_struct gdt_descr; @@ -377,6 +377,7 @@ static __init int svm_hardware_setup(void) void *msrpm_va; int r; + kvm_emulator_want_group7_invlpg(); iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); @@ -397,7 +398,7 @@ static __init int svm_hardware_setup(void) memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); @@ -704,7 +705,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 if (vcpu->shadow_efer & KVM_EFER_LME) { if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { vcpu->shadow_efer |= KVM_EFER_LMA; @@ -1097,7 +1098,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_IA32_APICBASE: *data = vcpu->apic_base; break; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 case MSR_STAR: *data = vcpu->svm->vmcb->save.star; break; @@ -1149,7 +1150,7 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) { switch (ecx) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 case MSR_EFER: set_efer(vcpu, data); break; @@ -1172,7 +1173,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) case MSR_IA32_APICBASE: vcpu->apic_base = data; break; -#ifdef __x86_64___ +#ifdef CONFIG_X86_64_ case MSR_STAR: vcpu->svm->vmcb->save.star = data; break; @@ -1345,53 +1346,18 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu) static void save_db_regs(unsigned long *db_regs) { -#ifdef __x86_64__ - asm ("mov %%dr0, %%rax \n\t" - "mov %%rax, %[dr0] \n\t" - "mov %%dr1, %%rax \n\t" - "mov %%rax, %[dr1] \n\t" - "mov %%dr2, %%rax \n\t" - "mov %%rax, %[dr2] \n\t" - "mov %%dr3, %%rax \n\t" - "mov %%rax, %[dr3] \n\t" - : [dr0] "=m"(db_regs[0]), - [dr1] "=m"(db_regs[1]), - [dr2] "=m"(db_regs[2]), - [dr3] "=m"(db_regs[3]) - : : "rax"); -#else - asm ("mov %%dr0, %%eax \n\t" - "mov %%eax, %[dr0] \n\t" - "mov %%dr1, %%eax \n\t" - "mov %%eax, %[dr1] \n\t" - "mov %%dr2, %%eax \n\t" - "mov %%eax, %[dr2] \n\t" - "mov %%dr3, %%eax \n\t" - "mov %%eax, %[dr3] \n\t" - : [dr0] "=m"(db_regs[0]), - [dr1] "=m"(db_regs[1]), - [dr2] "=m"(db_regs[2]), - [dr3] "=m"(db_regs[3]) - : : "eax"); -#endif + asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); + asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); + asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); + asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); } static void load_db_regs(unsigned long *db_regs) { - asm volatile ("mov %[dr0], %%dr0 \n\t" - "mov %[dr1], %%dr1 \n\t" - "mov %[dr2], %%dr2 \n\t" - "mov %[dr3], %%dr3 \n\t" - : - : [dr0] "r"(db_regs[0]), - [dr1] "r"(db_regs[1]), - [dr2] "r"(db_regs[2]), - [dr3] "r"(db_regs[3]) -#ifdef __x86_64__ - : "rax"); -#else - : "eax"); -#endif + asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); + asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); + asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); + asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); } static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) @@ -1422,7 +1388,7 @@ again: load_db_regs(vcpu->svm->db_regs); } asm volatile ( -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 "push %%rbx; push %%rcx; push %%rdx;" "push %%rsi; push %%rdi; push %%rbp;" "push %%r8; push %%r9; push %%r10; push %%r11;" @@ -1432,7 +1398,7 @@ again: "push %%esi; push %%edi; push %%ebp;" #endif -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 "mov %c[rbx](%[vcpu]), %%rbx \n\t" "mov %c[rcx](%[vcpu]), %%rcx \n\t" "mov %c[rdx](%[vcpu]), %%rdx \n\t" @@ -1456,7 +1422,7 @@ again: "mov %c[rbp](%[vcpu]), %%ebp \n\t" #endif -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 /* Enter guest mode */ "push %%rax \n\t" "mov %c[svm](%[vcpu]), %%rax \n\t" @@ -1477,7 +1443,7 @@ again: #endif /* Save guest registers, load host registers */ -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 "mov %%rbx, %c[rbx](%[vcpu]) \n\t" "mov %%rcx, %c[rcx](%[vcpu]) \n\t" "mov %%rdx, %c[rdx](%[vcpu]) \n\t" @@ -1518,7 +1484,7 @@ again: [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), @@ -1663,9 +1629,7 @@ static struct kvm_arch_ops svm_arch_ops = { static int __init svm_init(void) { - kvm_emulator_want_group7_invlpg(); - kvm_init_arch(&svm_arch_ops, THIS_MODULE); - return 0; + return kvm_init_arch(&svm_arch_ops, THIS_MODULE); } static void __exit svm_exit(void) diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index bda7a7ae2167..f0f0b1a781f8 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -22,6 +22,7 @@ #include <linux/mm.h> #include <linux/highmem.h> #include <asm/io.h> +#include <asm/desc.h> #include "segment_descriptor.h" @@ -33,7 +34,7 @@ MODULE_LICENSE("GPL"); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 #define HOST_IS_64 1 #else #define HOST_IS_64 0 @@ -70,15 +71,13 @@ static struct kvm_vmx_segment_field { }; static const u32 vmx_msr_index[] = { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, #endif MSR_EFER, MSR_K6_STAR, }; #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index)) -struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr); - static inline int is_page_fault(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | @@ -92,6 +91,16 @@ static inline int is_external_interrupt(u32 intr_info) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); } +static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) +{ + int i; + + for (i = 0; i < vcpu->nmsrs; ++i) + if (vcpu->guest_msrs[i].index == msr) + return &vcpu->guest_msrs[i]; + return 0; +} + static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); @@ -137,7 +146,7 @@ static u32 vmcs_read32(unsigned long field) static u64 vmcs_read64(unsigned long field) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 return vmcs_readl(field); #else return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); @@ -167,7 +176,7 @@ static void vmcs_write32(unsigned long field, u32 value) static void vmcs_write64(unsigned long field, u64 value) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 vmcs_writel(field, value); #else vmcs_writel(field, value); @@ -296,7 +305,7 @@ static void guest_write_tsc(u64 guest_tsc) static void reload_tss(void) { -#ifndef __x86_64__ +#ifndef CONFIG_X86_64 /* * VT restores TR but not its size. Useless. @@ -327,7 +336,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) } switch (msr_index) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 case MSR_FS_BASE: data = vmcs_readl(GUEST_FS_BASE); break; @@ -390,7 +399,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vmx_msr_entry *msr; switch (msr_index) { -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 case MSR_FS_BASE: vmcs_writel(GUEST_FS_BASE, data); break; @@ -525,7 +534,7 @@ static __init void hardware_enable(void *garbage) u64 old; rdmsrl(MSR_IA32_FEATURE_CONTROL, old); - if ((old & 5) == 0) + if ((old & 5) != 5) /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */ @@ -725,7 +734,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); } -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { @@ -767,7 +776,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) enter_rmode(vcpu); -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 if (vcpu->shadow_efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) enter_lmode(vcpu); @@ -808,7 +817,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vcpu->cr4 = cr4; } -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { @@ -883,6 +892,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } + if (ar == 0) /* a 0 value means unusable */ + ar = AR_UNUSABLE_MASK; vmcs_write32(sf->ar_bytes, ar); } @@ -1095,7 +1106,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); @@ -1164,8 +1175,10 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) VM_ENTRY_CONTROLS, 0); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ +#ifdef CONFIG_X86_64 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0); vmcs_writel(TPR_THRESHOLD, 0); +#endif vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); @@ -1173,7 +1186,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->cr0 = 0x60000010; vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode vmx_set_cr4(vcpu, 0); -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 vmx_set_efer(vcpu, 0); #endif @@ -1689,7 +1702,7 @@ again: vmcs_write16(HOST_GS_SELECTOR, 0); } -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else @@ -1713,7 +1726,7 @@ again: asm ( /* Store host registers */ "pushf \n\t" -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 "push %%rax; push %%rbx; push %%rdx;" "push %%rsi; push %%rdi; push %%rbp;" "push %%r8; push %%r9; push %%r10; push %%r11;" @@ -1727,7 +1740,7 @@ again: /* Check if vmlaunch of vmresume is needed */ "cmp $0, %1 \n\t" /* Load guest registers. Don't clobber flags. */ -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 "mov %c[cr2](%3), %%rax \n\t" "mov %%rax, %%cr2 \n\t" "mov %c[rax](%3), %%rax \n\t" @@ -1764,7 +1777,7 @@ again: ".globl kvm_vmx_return \n\t" "kvm_vmx_return: " /* Save guest registers, load host registers, keep flags */ -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 "xchg %3, 0(%%rsp) \n\t" "mov %%rax, %c[rax](%3) \n\t" "mov %%rbx, %c[rbx](%3) \n\t" @@ -1816,7 +1829,7 @@ again: [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), @@ -1837,7 +1850,7 @@ again: fx_save(vcpu->guest_fx_image); fx_restore(vcpu->host_fx_image); -#ifndef __x86_64__ +#ifndef CONFIG_X86_64 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); #endif @@ -1855,7 +1868,7 @@ again: */ local_irq_disable(); load_gs(gs_sel); -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); #endif local_irq_enable(); @@ -1965,7 +1978,7 @@ static struct kvm_arch_ops vmx_arch_ops = { .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 .set_efer = vmx_set_efer, #endif .get_idt = vmx_get_idt, @@ -1989,8 +2002,7 @@ static struct kvm_arch_ops vmx_arch_ops = { static int __init vmx_init(void) { - kvm_init_arch(&vmx_arch_ops, THIS_MODULE); - return 0; + return kvm_init_arch(&vmx_arch_ops, THIS_MODULE); } static void __exit vmx_exit(void) diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c index 7e838bf0592d..1bff3e925fda 100644 --- a/drivers/kvm/x86_emulate.c +++ b/drivers/kvm/x86_emulate.c @@ -238,7 +238,7 @@ struct operand { * any modified flags. */ -#if defined(__x86_64__) +#if defined(CONFIG_X86_64) #define _LO32 "k" /* force 32-bit operand */ #define _STK "%%rsp" /* stack pointer */ #elif defined(__i386__) @@ -385,7 +385,7 @@ struct operand { } while (0) /* Emulate an instruction with quadword operands (x86/64 only). */ -#if defined(__x86_64__) +#if defined(CONFIG_X86_64) #define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ do { \ __asm__ __volatile__ ( \ @@ -495,7 +495,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) case X86EMUL_MODE_PROT32: op_bytes = ad_bytes = 4; break; -#ifdef __x86_64__ +#ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: op_bytes = 4; ad_bytes = 8; @@ -1341,7 +1341,7 @@ twobyte_special_insn: } break; } -#elif defined(__x86_64__) +#elif defined(CONFIG_X86_64) { unsigned long old, new; if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0) diff --git a/drivers/kvm/x86_emulate.h b/drivers/kvm/x86_emulate.h index 658b58de30fc..5d41bd55125e 100644 --- a/drivers/kvm/x86_emulate.h +++ b/drivers/kvm/x86_emulate.h @@ -162,7 +162,7 @@ struct x86_emulate_ctxt { /* Host execution mode. */ #if defined(__i386__) #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 -#elif defined(__x86_64__) +#elif defined(CONFIG_X86_64) #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 #endif |