diff options
Diffstat (limited to 'arch/x86/kvm/lapic.c')
-rw-r--r-- | arch/x86/kvm/lapic.c | 227 |
1 files changed, 146 insertions, 81 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9322e6340a74..80a2020c4db4 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -68,6 +68,39 @@ static bool lapic_timer_advance_dynamic __read_mostly; /* step-by-step approximation to mitigate fluctuation */ #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 +static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val) +{ + *((u32 *) (regs + reg_off)) = val; +} + +static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) +{ + __kvm_lapic_set_reg(apic->regs, reg_off, val); +} + +static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg) +{ + BUILD_BUG_ON(reg != APIC_ICR); + return *((u64 *) (regs + reg)); +} + +static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg) +{ + return __kvm_lapic_get_reg64(apic->regs, reg); +} + +static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val) +{ + BUILD_BUG_ON(reg != APIC_ICR); + *((u64 *) (regs + reg)) = val; +} + +static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic, + int reg, u64 val) +{ + __kvm_lapic_set_reg64(apic->regs, reg, val); +} + static inline int apic_test_vector(int vec, void *bitmap) { return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); @@ -113,7 +146,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic) static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu) { - return pi_inject_timer && kvm_vcpu_apicv_active(vcpu); + return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) && + (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm)); } bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu) @@ -491,8 +525,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) if (unlikely(vcpu->arch.apicv_active)) { /* need to update RVI */ kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); - static_call(kvm_x86_hwapic_irr_update)(vcpu, - apic_find_highest_irr(apic)); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); } else { apic->irr_pending = false; kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); @@ -522,7 +555,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) * just set SVI. */ if (unlikely(vcpu->arch.apicv_active)) - static_call(kvm_x86_hwapic_isr_update)(vcpu, vec); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, vec); else { ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); @@ -570,8 +603,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) * and must be left alone. */ if (unlikely(vcpu->arch.apicv_active)) - static_call(kvm_x86_hwapic_isr_update)(vcpu, - apic_find_highest_isr(apic)); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); else { --apic->isr_count; BUG_ON(apic->isr_count < 0); @@ -1276,6 +1308,9 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) { struct kvm_lapic_irq irq; + /* KVM has no delay and should always clear the BUSY/PENDING flag. */ + WARN_ON_ONCE(icr_low & APIC_ICR_BUSY); + irq.vector = icr_low & APIC_VECTOR_MASK; irq.delivery_mode = icr_low & APIC_MODE_MASK; irq.dest_mode = icr_low & APIC_DEST_MASK; @@ -1292,6 +1327,7 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); } +EXPORT_SYMBOL_GPL(kvm_apic_send_ipi); static u32 apic_get_tmcct(struct kvm_lapic *apic) { @@ -1375,8 +1411,8 @@ static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) #define APIC_REGS_MASK(first, count) \ (APIC_REG_MASK(first) * ((1ull << (count)) - 1)) -int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, - void *data) +static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, + void *data) { unsigned char alignment = offset & 0xf; u32 result; @@ -1394,7 +1430,6 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) | APIC_REG_MASK(APIC_ESR) | APIC_REG_MASK(APIC_ICR) | - APIC_REG_MASK(APIC_ICR2) | APIC_REG_MASK(APIC_LVTT) | APIC_REG_MASK(APIC_LVTTHMR) | APIC_REG_MASK(APIC_LVTPC) | @@ -1405,9 +1440,16 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, APIC_REG_MASK(APIC_TMCCT) | APIC_REG_MASK(APIC_TDCR); - /* ARBPRI is not valid on x2APIC */ + /* + * ARBPRI and ICR2 are not valid in x2APIC mode. WARN if KVM reads ICR + * in x2APIC mode as it's an 8-byte register in x2APIC and needs to be + * manually handled by the caller. + */ if (!apic_x2apic_mode(apic)) - valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI); + valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) | + APIC_REG_MASK(APIC_ICR2); + else + WARN_ON_ONCE(offset == APIC_ICR); if (alignment + len > 4) return 1; @@ -1432,7 +1474,6 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, } return 0; } -EXPORT_SYMBOL_GPL(kvm_lapic_reg_read); static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) { @@ -1993,7 +2034,7 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) } } -int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) +static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) { int ret = 0; @@ -2052,16 +2093,18 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; } case APIC_ICR: + WARN_ON_ONCE(apic_x2apic_mode(apic)); + /* No delay here, so we always clear the pending bit */ - val &= ~(1 << 12); + val &= ~APIC_ICR_BUSY; kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2)); kvm_lapic_set_reg(apic, APIC_ICR, val); break; - case APIC_ICR2: - if (!apic_x2apic_mode(apic)) - val &= 0xff000000; - kvm_lapic_set_reg(apic, APIC_ICR2, val); + if (apic_x2apic_mode(apic)) + ret = 1; + else + kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000); break; case APIC_LVT0: @@ -2121,10 +2164,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; case APIC_SELF_IPI: - if (apic_x2apic_mode(apic)) { - kvm_lapic_reg_write(apic, APIC_ICR, - APIC_DEST_SELF | (val & APIC_VECTOR_MASK)); - } else + if (apic_x2apic_mode(apic)) + kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0); + else ret = 1; break; default: @@ -2132,11 +2174,15 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; } + /* + * Recalculate APIC maps if necessary, e.g. if the software enable bit + * was toggled, the APIC ID changed, etc... The maps are marked dirty + * on relevant changes, i.e. this is a nop for most writes. + */ kvm_recalculate_apic_map(apic->vcpu->kvm); return ret; } -EXPORT_SYMBOL_GPL(kvm_lapic_reg_write); static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t address, int len, const void *data) @@ -2180,12 +2226,7 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); /* emulate APIC access in a trap manner */ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) { - u32 val = 0; - - /* hw has done the conditional check and inst decode */ - offset &= 0xff0; - - kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val); + u32 val = kvm_lapic_get_reg(vcpu->arch.apic, offset); /* TODO: optimize to just emulate side effect w/o one more write */ kvm_lapic_reg_write(vcpu->arch.apic, offset, val); @@ -2242,10 +2283,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) { - struct kvm_lapic *apic = vcpu->arch.apic; - - apic_set_tpr(apic, ((cr8 & 0x0f) << 4) - | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4)); + apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4); } u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) @@ -2287,7 +2325,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) - static_call(kvm_x86_set_virtual_apic_mode)(vcpu); + static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu); apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; @@ -2356,8 +2394,12 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) if (!apic_x2apic_mode(apic)) kvm_apic_set_ldr(apic, 0); kvm_lapic_set_reg(apic, APIC_ESR, 0); - kvm_lapic_set_reg(apic, APIC_ICR, 0); - kvm_lapic_set_reg(apic, APIC_ICR2, 0); + if (!apic_x2apic_mode(apic)) { + kvm_lapic_set_reg(apic, APIC_ICR, 0); + kvm_lapic_set_reg(apic, APIC_ICR2, 0); + } else { + kvm_lapic_set_reg64(apic, APIC_ICR, 0); + } kvm_lapic_set_reg(apic, APIC_TDCR, 0); kvm_lapic_set_reg(apic, APIC_TMICT, 0); for (i = 0; i < 8; i++) { @@ -2373,9 +2415,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); if (vcpu->arch.apicv_active) { - static_call(kvm_x86_apicv_post_state_restore)(vcpu); - static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); - static_call(kvm_x86_hwapic_isr_update)(vcpu, -1); + static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, -1); } vcpu->arch.apic_arb_prio = 0; @@ -2574,6 +2616,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, if (apic_x2apic_mode(vcpu->arch.apic)) { u32 *id = (u32 *)(s->regs + APIC_ID); u32 *ldr = (u32 *)(s->regs + APIC_LDR); + u64 icr; if (vcpu->kvm->arch.x2apic_format) { if (*id != vcpu->vcpu_id) @@ -2585,9 +2628,21 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, *id <<= 24; } - /* In x2APIC mode, the LDR is fixed and based on the id */ - if (set) + /* + * In x2APIC mode, the LDR is fixed and based on the id. And + * ICR is internally a single 64-bit register, but needs to be + * split to ICR+ICR2 in userspace for backwards compatibility. + */ + if (set) { *ldr = kvm_apic_calc_x2apic_ldr(*id); + + icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | + (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32; + __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr); + } else { + icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); + __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); + } } return 0; @@ -2638,11 +2693,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) kvm_apic_update_apicv(vcpu); apic->highest_isr_cache = -1; if (vcpu->arch.apicv_active) { - static_call(kvm_x86_apicv_post_state_restore)(vcpu); - static_call(kvm_x86_hwapic_irr_update)(vcpu, - apic_find_highest_irr(apic)); - static_call(kvm_x86_hwapic_isr_update)(vcpu, - apic_find_highest_isr(apic)); + static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); if (ioapic_in_kernel(vcpu->kvm)) @@ -2779,73 +2832,85 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) return 0; } -int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) +int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) { - struct kvm_lapic *apic = vcpu->arch.apic; - u32 reg = (msr - APIC_BASE_MSR) << 4; + data &= ~APIC_ICR_BUSY; - if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) - return 1; + kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); + kvm_lapic_set_reg64(apic, APIC_ICR, data); + trace_kvm_apic_write(APIC_ICR, data); + return 0; +} + +static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) +{ + u32 low; + + if (reg == APIC_ICR) { + *data = kvm_lapic_get_reg64(apic, APIC_ICR); + return 0; + } - if (reg == APIC_ICR2) + if (kvm_lapic_reg_read(apic, reg, 4, &low)) return 1; - /* if this is ICR write vector before command */ + *data = low; + + return 0; +} + +static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data) +{ + /* + * ICR is a 64-bit register in x2APIC mode (and Hyper'v PV vAPIC) and + * can be written as such, all other registers remain accessible only + * through 32-bit reads/writes. + */ if (reg == APIC_ICR) - kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); + return kvm_x2apic_icr_write(apic, data); + return kvm_lapic_reg_write(apic, reg, (u32)data); } -int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) +int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; + u32 reg = (msr - APIC_BASE_MSR) << 4; if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) return 1; - if (reg == APIC_DFR || reg == APIC_ICR2) - return 1; + return kvm_lapic_msr_write(apic, reg, data); +} - if (kvm_lapic_reg_read(apic, reg, 4, &low)) +int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + u32 reg = (msr - APIC_BASE_MSR) << 4; + + if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) return 1; - if (reg == APIC_ICR) - kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high); - *data = (((u64)high) << 32) | low; + if (reg == APIC_DFR) + return 1; - return 0; + return kvm_lapic_msr_read(apic, reg, data); } int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) { - struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu)) return 1; - /* if this is ICR write vector before command */ - if (reg == APIC_ICR) - kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); - return kvm_lapic_reg_write(apic, reg, (u32)data); + return kvm_lapic_msr_write(vcpu->arch.apic, reg, data); } int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) { - struct kvm_lapic *apic = vcpu->arch.apic; - u32 low, high = 0; - if (!lapic_in_kernel(vcpu)) return 1; - if (kvm_lapic_reg_read(apic, reg, 4, &low)) - return 1; - if (reg == APIC_ICR) - kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high); - - *data = (((u64)high) << 32) | low; - - return 0; + return kvm_lapic_msr_read(vcpu->arch.apic, reg, data); } int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) @@ -2933,7 +2998,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu) /* evaluate pending_events before reading the vector */ smp_rmb(); sipi_vector = apic->sipi_vector; - kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector); + static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector); vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } } |