diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/cpuid.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/hyperv.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/irq_comm.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm/nested.c | 47 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 11 |
10 files changed, 54 insertions, 36 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 3fd6eec202d7..7456f9ad424b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void) F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | - F(SERIALIZE) + F(SERIALIZE) | F(TSXLDTRK) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1d450d7710d6..2f6510de6b0c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3028,7 +3028,7 @@ static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) case 0xa4: /* movsb */ case 0xa5: /* movsd/w */ *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; - /* fall through */ + fallthrough; case 0xaa: /* stosb */ case 0xab: /* stosd/w */ *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 814d3aee5cef..1d330564eed8 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1779,7 +1779,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); if (ret != HV_STATUS_INVALID_PORT_ID) break; - /* fall through - maybe userspace knows this conn_id. */ + fallthrough; /* maybe userspace knows this conn_id */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index c47d2acec529..4aa1c2e00e2a 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -285,7 +285,7 @@ int kvm_set_routing_entry(struct kvm *kvm, switch (ue->u.irqchip.irqchip) { case KVM_IRQCHIP_PIC_SLAVE: e->irqchip.pin += PIC_NUM_PINS / 2; - /* fall through */ + fallthrough; case KVM_IRQCHIP_PIC_MASTER: if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) return -EINVAL; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5ccbee7165a2..35cca2e0c802 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1053,7 +1053,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, switch (delivery_mode) { case APIC_DM_LOWEST: vcpu->arch.apic_arb_prio++; - /* fall through */ + fallthrough; case APIC_DM_FIXED: if (unlikely(trig_mode && !level)) break; @@ -1341,7 +1341,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) break; case APIC_TASKPRI: report_tpr_access(apic, false); - /* fall thru */ + fallthrough; default: val = kvm_lapic_get_reg(apic, offset); break; @@ -2027,7 +2027,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_LVT0: apic_manage_nmi_watchdog(apic, val); - /* fall through */ + fallthrough; case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT1: diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 76c5826e29a2..71aa3da2a0b7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4422,7 +4422,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; - /* fall through */ + fallthrough; case PT64_ROOT_4LEVEL: rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e90bc436f584..598a769f1961 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1062,10 +1062,14 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, struct vmcb *hsave = svm->nested.hsave; struct vmcb __user *user_vmcb = (struct vmcb __user *) &user_kvm_nested_state->data.svm[0]; - struct vmcb_control_area ctl; - struct vmcb_save_area save; + struct vmcb_control_area *ctl; + struct vmcb_save_area *save; + int ret; u32 cr0; + BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > + KVM_STATE_NESTED_SVM_VMCB_SIZE); + if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) return -EINVAL; @@ -1097,13 +1101,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) return -EINVAL; - if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl))) - return -EFAULT; - if (copy_from_user(&save, &user_vmcb->save, sizeof(save))) - return -EFAULT; - if (!nested_vmcb_check_controls(&ctl)) - return -EINVAL; + ret = -ENOMEM; + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + save = kzalloc(sizeof(*save), GFP_KERNEL); + if (!ctl || !save) + goto out_free; + + ret = -EFAULT; + if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) + goto out_free; + if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) + goto out_free; + + ret = -EINVAL; + if (!nested_vmcb_check_controls(ctl)) + goto out_free; /* * Processor state contains L2 state. Check that it is @@ -1111,15 +1124,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, */ cr0 = kvm_read_cr0(vcpu); if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) - return -EINVAL; + goto out_free; /* * Validate host state saved from before VMRUN (see * nested_svm_check_permissions). * TODO: validate reserved bits for all saved state. */ - if (!(save.cr0 & X86_CR0_PG)) - return -EINVAL; + if (!(save->cr0 & X86_CR0_PG)) + goto out_free; /* * All checks done, we can enter guest mode. L1 control fields @@ -1128,10 +1141,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, * contains saved L1 state. */ copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); - hsave->save = save; + hsave->save = *save; svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa; - load_nested_vmcb_control(svm, &ctl); + load_nested_vmcb_control(svm, ctl); nested_prepare_vmcb_control(svm); if (!nested_svm_vmrun_msrpm(svm)) @@ -1139,7 +1152,13 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, out_set_gif: svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); - return 0; + + ret = 0; +out_free: + kfree(save); + kfree(ctl); + + return ret; } struct kvm_x86_nested_ops svm_nested_ops = { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5764b87379cf..c91acabf18d0 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2668,7 +2668,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); - /* Fall through */ + fallthrough; default: return kvm_set_msr_common(vcpu, msr); } @@ -4170,6 +4170,8 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static int __init svm_init(void) { + __unused_size_checks(); + return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 75cd720c9e8c..8646a797b7a8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4654,7 +4654,7 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; - /* fall through */ + fallthrough; case DB_VECTOR: return !(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)); @@ -4827,7 +4827,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); - /* fall through */ + fallthrough; case BP_VECTOR: /* * Update instruction length as we may reinject #BP from @@ -5257,7 +5257,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } - /* fall through */ + fallthrough; case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; @@ -5610,7 +5610,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) * keeping track of global entries in shadow page tables. */ - /* fall-through */ + fallthrough; case INVPCID_TYPE_ALL_INCL_GLOBAL: kvm_mmu_unload(vcpu); return kvm_skip_emulated_instruction(vcpu); @@ -6579,7 +6579,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ + fallthrough; case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); @@ -6589,7 +6589,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ + fallthrough; case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e3de0fe5af37..1994602a0851 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1116,14 +1116,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) vcpu->arch.eff_db[dr] = val; break; case 4: - /* fall through */ case 6: if (!kvm_dr6_valid(val)) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; case 5: - /* fall through */ default: /* 7 */ if (!kvm_dr7_valid(val)) return -1; /* #GP */ @@ -1154,12 +1152,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) *val = vcpu->arch.db[array_index_nospec(dr, size)]; break; case 4: - /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: - /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; @@ -3051,7 +3047,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: - pr = true; /* fall through */ + pr = true; + fallthrough; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: if (kvm_pmu_is_valid_msr(vcpu, msr)) @@ -4362,7 +4359,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, case KVM_CAP_HYPERV_SYNIC2: if (cap->args[0]) return -EINVAL; - /* fall through */ + fallthrough; case KVM_CAP_HYPERV_SYNIC: if (!irqchip_in_kernel(vcpu->kvm)) @@ -8675,7 +8672,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - /* fall through */ + fallthrough; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; |