diff options
author | Linus Torvalds | 2014-12-18 16:05:28 -0800 |
---|---|---|
committer | Linus Torvalds | 2014-12-18 16:05:28 -0800 |
commit | 66dcff86ba40eebb5133cccf450878f2bba102ef (patch) | |
tree | e7eb49ad9316989a529b00303d2dd2cffa61a7f5 /arch/s390 | |
parent | 91ed9e8a32d9a76adc59c83f8b40024076cf8a02 (diff) | |
parent | 2c4aa55a6af070262cca425745e8e54310e96b8d (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini:
"3.19 changes for KVM:
- spring cleaning: removed support for IA64, and for hardware-
assisted virtualization on the PPC970
- ARM, PPC, s390 all had only small fixes
For x86:
- small performance improvements (though only on weird guests)
- usual round of hardware-compliancy fixes from Nadav
- APICv fixes
- XSAVES support for hosts and guests. XSAVES hosts were broken
because the (non-KVM) XSAVES patches inadvertently changed the KVM
userspace ABI whenever XSAVES was enabled; hence, this part is
going to stable. Guest support is just a matter of exposing the
feature and CPUID leaves support"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits)
KVM: move APIC types to arch/x86/
KVM: PPC: Book3S: Enable in-kernel XICS emulation by default
KVM: PPC: Book3S HV: Improve H_CONFER implementation
KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register
KVM: PPC: Book3S HV: Remove code for PPC970 processors
KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
KVM: PPC: Book3S HV: Simplify locking around stolen time calculations
arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function
arch: powerpc: kvm: book3s_pr.c: Remove unused function
arch: powerpc: kvm: book3s.c: Remove some unused functions
arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function
KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked
KVM: PPC: Book3S HV: ptes are big endian
KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI
KVM: PPC: Book3S HV: Fix KSM memory corruption
KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI
KVM: PPC: Book3S HV: Fix computation of tlbie operand
KVM: PPC: Book3S HV: Add missing HPTE unlock
KVM: PPC: BookE: Improve irq inject tracepoint
arm/arm64: KVM: Require in-kernel vgic for the arch timers
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 99 | ||||
-rw-r--r-- | arch/s390/include/asm/pgalloc.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/sigp.h | 1 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.c | 40 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 20 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 1044 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 22 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 11 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 95 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 305 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 41 |
11 files changed, 1092 insertions, 587 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 2175f911a73a..9cba74d5d853 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -123,7 +123,7 @@ struct kvm_s390_sie_block { #define ICPT_PARTEXEC 0x38 #define ICPT_IOINST 0x40 __u8 icptcode; /* 0x0050 */ - __u8 reserved51; /* 0x0051 */ + __u8 icptstatus; /* 0x0051 */ __u16 ihcpu; /* 0x0052 */ __u8 reserved54[2]; /* 0x0054 */ __u16 ipa; /* 0x0056 */ @@ -226,10 +226,17 @@ struct kvm_vcpu_stat { u32 instruction_sigp_sense_running; u32 instruction_sigp_external_call; u32 instruction_sigp_emergency; + u32 instruction_sigp_cond_emergency; + u32 instruction_sigp_start; u32 instruction_sigp_stop; + u32 instruction_sigp_stop_store_status; + u32 instruction_sigp_store_status; u32 instruction_sigp_arch; u32 instruction_sigp_prefix; u32 instruction_sigp_restart; + u32 instruction_sigp_init_cpu_reset; + u32 instruction_sigp_cpu_reset; + u32 instruction_sigp_unknown; u32 diagnose_10; u32 diagnose_44; u32 diagnose_9c; @@ -288,6 +295,79 @@ struct kvm_vcpu_stat { #define PGM_PER 0x80 #define PGM_CRYPTO_OPERATION 0x119 +/* irq types in order of priority */ +enum irq_types { + IRQ_PEND_MCHK_EX = 0, + IRQ_PEND_SVC, + IRQ_PEND_PROG, + IRQ_PEND_MCHK_REP, + IRQ_PEND_EXT_IRQ_KEY, + IRQ_PEND_EXT_MALFUNC, + IRQ_PEND_EXT_EMERGENCY, + IRQ_PEND_EXT_EXTERNAL, + IRQ_PEND_EXT_CLOCK_COMP, + IRQ_PEND_EXT_CPU_TIMER, + IRQ_PEND_EXT_TIMING, + IRQ_PEND_EXT_SERVICE, + IRQ_PEND_EXT_HOST, + IRQ_PEND_PFAULT_INIT, + IRQ_PEND_PFAULT_DONE, + IRQ_PEND_VIRTIO, + IRQ_PEND_IO_ISC_0, + IRQ_PEND_IO_ISC_1, + IRQ_PEND_IO_ISC_2, + IRQ_PEND_IO_ISC_3, + IRQ_PEND_IO_ISC_4, + IRQ_PEND_IO_ISC_5, + IRQ_PEND_IO_ISC_6, + IRQ_PEND_IO_ISC_7, + IRQ_PEND_SIGP_STOP, + IRQ_PEND_RESTART, + IRQ_PEND_SET_PREFIX, + IRQ_PEND_COUNT +}; + +/* + * Repressible (non-floating) machine check interrupts + * subclass bits in MCIC + */ +#define MCHK_EXTD_BIT 58 +#define MCHK_DEGR_BIT 56 +#define MCHK_WARN_BIT 55 +#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \ + (1UL << MCHK_EXTD_BIT) | \ + (1UL << MCHK_WARN_BIT)) + +/* Exigent machine check interrupts subclass bits in MCIC */ +#define MCHK_SD_BIT 63 +#define MCHK_PD_BIT 62 +#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT)) + +#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \ + (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \ + (1UL << IRQ_PEND_EXT_CPU_TIMER) | \ + (1UL << IRQ_PEND_EXT_MALFUNC) | \ + (1UL << IRQ_PEND_EXT_EMERGENCY) | \ + (1UL << IRQ_PEND_EXT_EXTERNAL) | \ + (1UL << IRQ_PEND_EXT_TIMING) | \ + (1UL << IRQ_PEND_EXT_HOST) | \ + (1UL << IRQ_PEND_EXT_SERVICE) | \ + (1UL << IRQ_PEND_VIRTIO) | \ + (1UL << IRQ_PEND_PFAULT_INIT) | \ + (1UL << IRQ_PEND_PFAULT_DONE)) + +#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \ + (1UL << IRQ_PEND_IO_ISC_1) | \ + (1UL << IRQ_PEND_IO_ISC_2) | \ + (1UL << IRQ_PEND_IO_ISC_3) | \ + (1UL << IRQ_PEND_IO_ISC_4) | \ + (1UL << IRQ_PEND_IO_ISC_5) | \ + (1UL << IRQ_PEND_IO_ISC_6) | \ + (1UL << IRQ_PEND_IO_ISC_7)) + +#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \ + (1UL << IRQ_PEND_MCHK_EX)) + struct kvm_s390_interrupt_info { struct list_head list; u64 type; @@ -306,14 +386,25 @@ struct kvm_s390_interrupt_info { #define ACTION_STORE_ON_STOP (1<<0) #define ACTION_STOP_ON_STOP (1<<1) +struct kvm_s390_irq_payload { + struct kvm_s390_io_info io; + struct kvm_s390_ext_info ext; + struct kvm_s390_pgm_info pgm; + struct kvm_s390_emerg_info emerg; + struct kvm_s390_extcall_info extcall; + struct kvm_s390_prefix_info prefix; + struct kvm_s390_mchk_info mchk; +}; + struct kvm_s390_local_interrupt { spinlock_t lock; - struct list_head list; - atomic_t active; struct kvm_s390_float_interrupt *float_int; wait_queue_head_t *wq; atomic_t *cpuflags; unsigned int action_bits; + DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); + struct kvm_s390_irq_payload irq; + unsigned long pending_irqs; }; struct kvm_s390_float_interrupt { @@ -434,6 +525,8 @@ struct kvm_arch{ int user_cpu_state_ctrl; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; + int ipte_lock_count; + struct mutex ipte_mutex; spinlock_t start_stop_lock; struct kvm_s390_crypto crypto; }; diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index e510b9460efa..3009c2ba46d2 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -24,6 +24,7 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq); +unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr); static inline void clear_table(unsigned long *s, unsigned long val, size_t n) { diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index 49576115dbb7..fad4ae23ece0 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -10,6 +10,7 @@ #define SIGP_RESTART 6 #define SIGP_STOP_AND_STORE_STATUS 9 #define SIGP_INITIAL_CPU_RESET 11 +#define SIGP_CPU_RESET 12 #define SIGP_SET_PREFIX 13 #define SIGP_STORE_STATUS_AT_ADDRESS 14 #define SIGP_SET_ARCHITECTURE 18 diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 0f961a1c64b3..8b9ccf02a2c5 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -207,8 +207,6 @@ union raddress { unsigned long pfra : 52; /* Page-Frame Real Address */ }; -static int ipte_lock_count; -static DEFINE_MUTEX(ipte_mutex); int ipte_lock_held(struct kvm_vcpu *vcpu) { @@ -216,47 +214,51 @@ int ipte_lock_held(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->eca & 1) return ic->kh != 0; - return ipte_lock_count != 0; + return vcpu->kvm->arch.ipte_lock_count != 0; } static void ipte_lock_simple(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; - mutex_lock(&ipte_mutex); - ipte_lock_count++; - if (ipte_lock_count > 1) + mutex_lock(&vcpu->kvm->arch.ipte_mutex); + vcpu->kvm->arch.ipte_lock_count++; + if (vcpu->kvm->arch.ipte_lock_count > 1) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); while (old.k) { cond_resched(); - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); } new = old; new.k = 1; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); out: - mutex_unlock(&ipte_mutex); + mutex_unlock(&vcpu->kvm->arch.ipte_mutex); } static void ipte_unlock_simple(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; - mutex_lock(&ipte_mutex); - ipte_lock_count--; - if (ipte_lock_count) + mutex_lock(&vcpu->kvm->arch.ipte_mutex); + vcpu->kvm->arch.ipte_lock_count--; + if (vcpu->kvm->arch.ipte_lock_count) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { - new = old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); + new = old; new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); wake_up(&vcpu->kvm->arch.ipte_wq); out: - mutex_unlock(&ipte_mutex); + mutex_unlock(&vcpu->kvm->arch.ipte_mutex); } static void ipte_lock_siif(struct kvm_vcpu *vcpu) @@ -265,10 +267,12 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) ic = &vcpu->kvm->arch.sca->ipte_control; do { - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); while (old.kg) { cond_resched(); - old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); } new = old; new.k = 1; @@ -282,7 +286,9 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) ic = &vcpu->kvm->arch.sca->ipte_control; do { - new = old = ACCESS_ONCE(*ic); + old = *ic; + barrier(); + new = old; new.kh--; if (!new.kh) new.k = 0; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index eaf46291d361..81c77ab8102e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -38,6 +38,19 @@ static const intercept_handler_t instruction_handlers[256] = { [0xeb] = kvm_s390_handle_eb, }; +void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) +{ + struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; + + /* Use the length of the EXECUTE instruction if necessary */ + if (sie_block->icptstatus & 1) { + ilc = (sie_block->icptstatus >> 4) & 0x6; + if (!ilc) + ilc = 4; + } + sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc); +} + static int handle_noop(struct kvm_vcpu *vcpu) { switch (vcpu->arch.sie_block->icptcode) { @@ -244,7 +257,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) static int handle_external_interrupt(struct kvm_vcpu *vcpu) { u16 eic = vcpu->arch.sie_block->eic; - struct kvm_s390_interrupt irq; + struct kvm_s390_irq irq; psw_t newpsw; int rc; @@ -269,7 +282,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) if (kvm_s390_si_ext_call_pending(vcpu)) return 0; irq.type = KVM_S390_INT_EXTERNAL_CALL; - irq.parm = vcpu->arch.sie_block->extcpuaddr; + irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; break; default: return -EOPNOTSUPP; @@ -288,7 +301,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) */ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; unsigned long srcaddr, dstaddr; int reg1, reg2, rc; @@ -310,7 +322,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) if (rc != 0) return rc; - psw->addr = __rewind_psw(*psw, 4); + kvm_s390_rewind_psw(vcpu, 4); return 0; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a39838457f01..f00f31e66cd8 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -16,6 +16,7 @@ #include <linux/mmu_context.h> #include <linux/signal.h> #include <linux/slab.h> +#include <linux/bitmap.h> #include <asm/asm-offsets.h> #include <asm/uaccess.h> #include "kvm-s390.h" @@ -27,8 +28,8 @@ #define IOINT_CSSID_MASK 0x03fc0000 #define IOINT_AI_MASK 0x04000000 #define PFAULT_INIT 0x0600 - -static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu); +#define PFAULT_DONE 0x0680 +#define VIRTIO_PARAM 0x0d00 static int is_ioint(u64 type) { @@ -136,6 +137,31 @@ static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, return 0; } +static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.local_int.pending_irqs; +} + +static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) +{ + unsigned long active_mask = pending_local_irqs(vcpu); + + if (psw_extint_disabled(vcpu)) + active_mask &= ~IRQ_PEND_EXT_MASK; + if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) + __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) + __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); + if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) + __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); + if (psw_mchk_disabled(vcpu)) + active_mask &= ~IRQ_PEND_MCHK_MASK; + + return active_mask; +} + static void __set_cpu_idle(struct kvm_vcpu *vcpu) { atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); @@ -170,26 +196,45 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); } +static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) +{ + if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) + return; + if (psw_extint_disabled(vcpu)) + __set_cpuflag(vcpu, CPUSTAT_EXT_INT); + else + vcpu->arch.sie_block->lctl |= LCTL_CR0; +} + +static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) +{ + if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) + return; + if (psw_mchk_disabled(vcpu)) + vcpu->arch.sie_block->ictl |= ICTL_LPSW; + else + vcpu->arch.sie_block->lctl |= LCTL_CR14; +} + +/* Set interception request for non-deliverable local interrupts */ +static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) +{ + set_intercept_indicators_ext(vcpu); + set_intercept_indicators_mchk(vcpu); +} + static void __set_intercept_indicator(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info *inti) { switch (inti->type) { - case KVM_S390_INT_EXTERNAL_CALL: - case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_SERVICE: - case KVM_S390_INT_PFAULT_INIT: case KVM_S390_INT_PFAULT_DONE: case KVM_S390_INT_VIRTIO: - case KVM_S390_INT_CLOCK_COMP: - case KVM_S390_INT_CPU_TIMER: if (psw_extint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_EXT_INT); else vcpu->arch.sie_block->lctl |= LCTL_CR0; break; - case KVM_S390_SIGP_STOP: - __set_cpuflag(vcpu, CPUSTAT_STOP_INT); - break; case KVM_S390_MCHK: if (psw_mchk_disabled(vcpu)) vcpu->arch.sie_block->ictl |= ICTL_LPSW; @@ -226,13 +271,236 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) } } -static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info) +static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + int rc; + + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, + 0, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + int rc; + + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, + 0, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, + (u16 __user *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_ext_info ext; + int rc; + + spin_lock(&li->lock); + ext = li->irq.ext; + clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); + li->irq.ext.ext_params2 = 0; + spin_unlock(&li->lock); + + VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", + 0, ext.ext_params2); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_INIT, + 0, ext.ext_params2); + + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_mchk_info mchk; + int rc; + + spin_lock(&li->lock); + mchk = li->irq.mchk; + /* + * If there was an exigent machine check pending, then any repressible + * machine checks that might have been pending are indicated along + * with it, so always clear both bits + */ + clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); + clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); + memset(&li->irq.mchk, 0, sizeof(mchk)); + spin_unlock(&li->lock); + + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + mchk.mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, + mchk.cr14, mchk.mcic); + + rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); + rc |= put_guest_lc(vcpu, mchk.mcic, + (u64 __user *) __LC_MCCK_CODE); + rc |= put_guest_lc(vcpu, mchk.failing_storage_address, + (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); + rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, + &mchk.fixed_logout, sizeof(mchk.fixed_logout)); + rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + int rc; + + VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); + vcpu->stat.deliver_restart_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); + + rc = write_guest_lc(vcpu, + offsetof(struct _lowcore, restart_old_psw), + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_stop(struct kvm_vcpu *vcpu) +{ + VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); + vcpu->stat.deliver_stop_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP, + 0, 0); + + __set_cpuflag(vcpu, CPUSTAT_STOP_INT); + clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs); + return 0; +} + +static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_prefix_info prefix; + + spin_lock(&li->lock); + prefix = li->irq.prefix; + li->irq.prefix.address = 0; + clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); + spin_unlock(&li->lock); + + VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address); + vcpu->stat.deliver_prefix_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_SIGP_SET_PREFIX, + prefix.address, 0); + + kvm_s390_set_prefix(vcpu, prefix.address); + return 0; +} + +static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + int rc; + int cpu_addr; + + spin_lock(&li->lock); + cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); + clear_bit(cpu_addr, li->sigp_emerg_pending); + if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) + clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); + spin_unlock(&li->lock); + + VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); + vcpu->stat.deliver_emergency_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, + cpu_addr, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_extcall_info extcall; + int rc; + + spin_lock(&li->lock); + extcall = li->irq.extcall; + li->irq.extcall.code = 0; + clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); + spin_unlock(&li->lock); + + VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); + vcpu->stat.deliver_external_call++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_EXTERNAL_CALL, + extcall.code, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, + (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) { + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_pgm_info pgm_info; int rc = 0; u16 ilc = get_ilc(vcpu); - switch (pgm_info->code & ~PGM_PER) { + spin_lock(&li->lock); + pgm_info = li->irq.pgm; + clear_bit(IRQ_PEND_PROG, &li->pending_irqs); + memset(&li->irq.pgm, 0, sizeof(pgm_info)); + spin_unlock(&li->lock); + + VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", + pgm_info.code, ilc); + vcpu->stat.deliver_program_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, + pgm_info.code, 0); + + switch (pgm_info.code & ~PGM_PER) { case PGM_AFX_TRANSLATION: case PGM_ASX_TRANSLATION: case PGM_EX_TRANSLATION: @@ -243,7 +511,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, case PGM_PRIMARY_AUTHORITY: case PGM_SECONDARY_AUTHORITY: case PGM_SPACE_SWITCH: - rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, + rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); break; case PGM_ALEN_TRANSLATION: @@ -252,7 +520,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, case PGM_ASTE_SEQUENCE: case PGM_ASTE_VALIDITY: case PGM_EXTENDED_AUTHORITY: - rc = put_guest_lc(vcpu, pgm_info->exc_access_id, + rc = put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); break; case PGM_ASCE_TYPE: @@ -261,247 +529,208 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, case PGM_REGION_SECOND_TRANS: case PGM_REGION_THIRD_TRANS: case PGM_SEGMENT_TRANSLATION: - rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, + rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); - rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, + rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); - rc |= put_guest_lc(vcpu, pgm_info->op_access_id, + rc |= put_guest_lc(vcpu, pgm_info.op_access_id, (u8 *)__LC_OP_ACCESS_ID); break; case PGM_MONITOR: - rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, - (u64 *)__LC_MON_CLASS_NR); - rc |= put_guest_lc(vcpu, pgm_info->mon_code, + rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, + (u16 *)__LC_MON_CLASS_NR); + rc |= put_guest_lc(vcpu, pgm_info.mon_code, (u64 *)__LC_MON_CODE); break; case PGM_DATA: - rc = put_guest_lc(vcpu, pgm_info->data_exc_code, + rc = put_guest_lc(vcpu, pgm_info.data_exc_code, (u32 *)__LC_DATA_EXC_CODE); break; case PGM_PROTECTION: - rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, + rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); - rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, + rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); break; } - if (pgm_info->code & PGM_PER) { - rc |= put_guest_lc(vcpu, pgm_info->per_code, + if (pgm_info.code & PGM_PER) { + rc |= put_guest_lc(vcpu, pgm_info.per_code, (u8 *) __LC_PER_CODE); - rc |= put_guest_lc(vcpu, pgm_info->per_atmid, + rc |= put_guest_lc(vcpu, pgm_info.per_atmid, (u8 *)__LC_PER_ATMID); - rc |= put_guest_lc(vcpu, pgm_info->per_address, + rc |= put_guest_lc(vcpu, pgm_info.per_address, (u64 *) __LC_PER_ADDRESS); - rc |= put_guest_lc(vcpu, pgm_info->per_access_id, + rc |= put_guest_lc(vcpu, pgm_info.per_access_id, (u8 *) __LC_PER_ACCESS_ID); } rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); - rc |= put_guest_lc(vcpu, pgm_info->code, + rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_INT_CODE); rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc ? -EFAULT : 0; +} - return rc; +static int __must_check __deliver_service(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", + inti->ext.ext_params); + vcpu->stat.deliver_service_signal++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->ext.ext_params, 0); + + rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); + return rc ? -EFAULT : 0; } -static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt_info *inti) +static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) { - const unsigned short table[] = { 2, 4, 4, 6 }; - int rc = 0; + int rc; + + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_DONE, 0, + inti->ext.ext_params2); + + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", + inti->ext.ext_params, inti->ext.ext_params2); + vcpu->stat.deliver_virtio_interrupt++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->ext.ext_params, + inti->ext.ext_params2); + + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); + rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= put_guest_lc(vcpu, inti->ext.ext_params, + (u32 *)__LC_EXT_PARAMS); + rc |= put_guest_lc(vcpu, inti->ext.ext_params2, + (u64 *)__LC_EXT_PARAMS2); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_io(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + vcpu->stat.deliver_io_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + ((__u32)inti->io.subchannel_id << 16) | + inti->io.subchannel_nr, + ((__u64)inti->io.io_int_parm << 32) | + inti->io.io_int_word); + + rc = put_guest_lc(vcpu, inti->io.subchannel_id, + (u16 *)__LC_SUBCHANNEL_ID); + rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, + (u16 *)__LC_SUBCHANNEL_NR); + rc |= put_guest_lc(vcpu, inti->io.io_int_parm, + (u32 *)__LC_IO_INT_PARM); + rc |= put_guest_lc(vcpu, inti->io.io_int_word, + (u32 *)__LC_IO_INT_WORD); + rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc ? -EFAULT : 0; +} + +static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + struct kvm_s390_mchk_info *mchk = &inti->mchk; + int rc; + + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + mchk->mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, + mchk->cr14, mchk->mcic); + + rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); + rc |= put_guest_lc(vcpu, mchk->mcic, + (u64 __user *) __LC_MCCK_CODE); + rc |= put_guest_lc(vcpu, mchk->failing_storage_address, + (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); + rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, + &mchk->fixed_logout, sizeof(mchk->fixed_logout)); + rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + return rc ? -EFAULT : 0; +} + +typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); + +static const deliver_irq_t deliver_irq_funcs[] = { + [IRQ_PEND_MCHK_EX] = __deliver_machine_check, + [IRQ_PEND_PROG] = __deliver_prog, + [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, + [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, + [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, + [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, + [IRQ_PEND_RESTART] = __deliver_restart, + [IRQ_PEND_SIGP_STOP] = __deliver_stop, + [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, + [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, +}; + +static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, + struct kvm_s390_interrupt_info *inti) +{ + int rc; switch (inti->type) { - case KVM_S390_INT_EMERGENCY: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); - vcpu->stat.deliver_emergency_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->emerg.code, 0); - rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, inti->emerg.code, - (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - break; - case KVM_S390_INT_EXTERNAL_CALL: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); - vcpu->stat.deliver_external_call++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->extcall.code, 0); - rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, inti->extcall.code, - (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - break; - case KVM_S390_INT_CLOCK_COMP: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); - rc = deliver_ckc_interrupt(vcpu); - break; - case KVM_S390_INT_CPU_TIMER: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); - rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, - (u16 *)__LC_EXT_INT_CODE); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); - break; case KVM_S390_INT_SERVICE: - VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", - inti->ext.ext_params); - vcpu->stat.deliver_service_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, 0); - rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); - break; - case KVM_S390_INT_PFAULT_INIT: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, - inti->ext.ext_params2); - rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, - (u16 *) __LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *) __LC_EXT_PARAMS2); + rc = __deliver_service(vcpu, inti); break; case KVM_S390_INT_PFAULT_DONE: - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, - inti->ext.ext_params2); - rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); + rc = __deliver_pfault_done(vcpu, inti); break; case KVM_S390_INT_VIRTIO: - VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", - inti->ext.ext_params, inti->ext.ext_params2); - vcpu->stat.deliver_virtio_interrupt++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->ext.ext_params, - inti->ext.ext_params2); - rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); - rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= put_guest_lc(vcpu, inti->ext.ext_params, - (u32 *)__LC_EXT_PARAMS); - rc |= put_guest_lc(vcpu, inti->ext.ext_params2, - (u64 *)__LC_EXT_PARAMS2); - break; - case KVM_S390_SIGP_STOP: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); - vcpu->stat.deliver_stop_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - 0, 0); - __set_intercept_indicator(vcpu, inti); - break; - - case KVM_S390_SIGP_SET_PREFIX: - VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", - inti->prefix.address); - vcpu->stat.deliver_prefix_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->prefix.address, 0); - kvm_s390_set_prefix(vcpu, inti->prefix.address); - break; - - case KVM_S390_RESTART: - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); - vcpu->stat.deliver_restart_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - 0, 0); - rc = write_guest_lc(vcpu, - offsetof(struct _lowcore, restart_old_psw), - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __deliver_virtio(vcpu, inti); break; - case KVM_S390_PROGRAM_INT: - VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", - inti->pgm.code, - table[vcpu->arch.sie_block->ipa >> 14]); - vcpu->stat.deliver_program_int++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->pgm.code, 0); - rc = __deliver_prog_irq(vcpu, &inti->pgm); - break; - case KVM_S390_MCHK: - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", - inti->mchk.mcic); - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - inti->mchk.cr14, - inti->mchk.mcic); - rc = kvm_s390_vcpu_store_status(vcpu, - KVM_S390_STORE_STATUS_PREFIXED); - rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); - rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + rc = __deliver_mchk_floating(vcpu, inti); break; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - { - __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | - inti->io.subchannel_nr; - __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | - inti->io.io_int_word; - VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); - vcpu->stat.deliver_io_int++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, - param0, param1); - rc = put_guest_lc(vcpu, inti->io.subchannel_id, - (u16 *)__LC_SUBCHANNEL_ID); - rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, - (u16 *)__LC_SUBCHANNEL_NR); - rc |= put_guest_lc(vcpu, inti->io.io_int_parm, - (u32 *)__LC_IO_INT_PARM); - rc |= put_guest_lc(vcpu, inti->io.io_int_word, - (u32 *)__LC_IO_INT_WORD); - rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __deliver_io(vcpu, inti); break; - } default: BUG(); } @@ -509,19 +738,6 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, return rc; } -static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu) -{ - int rc; - - rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - return rc; -} - /* Check whether SIGP interpretation facility has an external call pending */ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) { @@ -538,20 +754,11 @@ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) { - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_interrupt_info *inti; - int rc = 0; + int rc; - if (atomic_read(&li->active)) { - spin_lock(&li->lock); - list_for_each_entry(inti, &li->list, list) - if (__interrupt_is_deliverable(vcpu, inti)) { - rc = 1; - break; - } - spin_unlock(&li->lock); - } + rc = !!deliverable_local_irqs(vcpu); if ((!rc) && atomic_read(&fi->active)) { spin_lock(&fi->lock); @@ -643,18 +850,15 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *n, *inti = NULL; spin_lock(&li->lock); - list_for_each_entry_safe(inti, n, &li->list, list) { - list_del(&inti->list); - kfree(inti); - } - atomic_set(&li->active, 0); + li->pending_irqs = 0; + bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); + memset(&li->irq, 0, sizeof(li->irq)); spin_unlock(&li->lock); /* clear pending external calls set by sigp interpretation facility */ - atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_clear_mask(SIGP_CTRL_C, &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); } @@ -664,34 +868,35 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_interrupt_info *n, *inti = NULL; + deliver_irq_t func; int deliver; int rc = 0; + unsigned long irq_type; + unsigned long deliverable_irqs; __reset_intercept_indicators(vcpu); - if (atomic_read(&li->active)) { - do { - deliver = 0; - spin_lock(&li->lock); - list_for_each_entry_safe(inti, n, &li->list, list) { - if (__interrupt_is_deliverable(vcpu, inti)) { - list_del(&inti->list); - deliver = 1; - break; - } - __set_intercept_indicator(vcpu, inti); - } - if (list_empty(&li->list)) - atomic_set(&li->active, 0); - spin_unlock(&li->lock); - if (deliver) { - rc = __do_deliver_interrupt(vcpu, inti); - kfree(inti); - } - } while (!rc && deliver); - } - if (!rc && kvm_cpu_has_pending_timer(vcpu)) - rc = deliver_ckc_interrupt(vcpu); + /* pending ckc conditions might have been invalidated */ + clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + if (kvm_cpu_has_pending_timer(vcpu)) + set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + + do { + deliverable_irqs = deliverable_local_irqs(vcpu); + /* bits are in the order of interrupt priority */ + irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); + if (irq_type == IRQ_PEND_COUNT) + break; + func = deliver_irq_funcs[irq_type]; + if (!func) { + WARN_ON_ONCE(func == NULL); + clear_bit(irq_type, &li->pending_irqs); + continue; + } + rc = func(vcpu); + } while (!rc && irq_type != IRQ_PEND_COUNT); + + set_intercept_indicators_local(vcpu); if (!rc && atomic_read(&fi->active)) { do { @@ -710,7 +915,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) atomic_set(&fi->active, 0); spin_unlock(&fi->lock); if (deliver) { - rc = __do_deliver_interrupt(vcpu, inti); + rc = __deliver_floating_interrupt(vcpu, inti); kfree(inti); } } while (!rc && deliver); @@ -719,23 +924,26 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) return rc; } -int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) +static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return -ENOMEM; + li->irq.pgm = irq->u.pgm; + set_bit(IRQ_PEND_PROG, &li->pending_irqs); + return 0; +} - inti->type = KVM_S390_PROGRAM_INT; - inti->pgm.code = code; +int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_irq irq; VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code, + 0, 1); spin_lock(&li->lock); - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); + irq.u.pgm.code = code; + __inject_prog(vcpu, &irq); BUG_ON(waitqueue_active(li->wq)); spin_unlock(&li->lock); return 0; @@ -745,27 +953,166 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, struct kvm_s390_pgm_info *pgm_info) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; - - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return -ENOMEM; + struct kvm_s390_irq irq; + int rc; VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", pgm_info->code); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, pgm_info->code, 0, 1); - - inti->type = KVM_S390_PROGRAM_INT; - memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); spin_lock(&li->lock); - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); + irq.u.pgm = *pgm_info; + rc = __inject_prog(vcpu, &irq); BUG_ON(waitqueue_active(li->wq)); spin_unlock(&li->lock); + return rc; +} + +static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", + irq->u.ext.ext_params, irq->u.ext.ext_params2); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, + irq->u.ext.ext_params, + irq->u.ext.ext_params2, 2); + + li->irq.ext = irq->u.ext; + set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } +int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_extcall_info *extcall = &li->irq.extcall; + + VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", + irq->u.extcall.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, + irq->u.extcall.code, 0, 2); + + *extcall = irq->u.extcall; + set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_prefix_info *prefix = &li->irq.prefix; + + VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", + prefix->address); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, + prefix->address, 0, 2); + + *prefix = irq->u.prefix; + set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); + return 0; +} + +static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); + + li->action_bits |= ACTION_STOP_ON_STOP; + set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); + return 0; +} + +static int __inject_sigp_restart(struct kvm_vcpu *vcpu, + struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); + + set_bit(IRQ_PEND_RESTART, &li->pending_irqs); + return 0; +} + +static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, + struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_emerg_info *emerg = &li->irq.emerg; + + VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", + irq->u.emerg.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, + emerg->code, 0, 2); + + set_bit(emerg->code, li->sigp_emerg_pending); + set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_mchk_info *mchk = &li->irq.mchk; + + VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + mchk->mcic); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, + mchk->mcic, 2); + + /* + * Because repressible machine checks can be indicated along with + * exigent machine checks (PoP, Chapter 11, Interruption action) + * we need to combine cr14, mcic and external damage code. + * Failing storage address and the logout area should not be or'ed + * together, we just indicate the last occurrence of the corresponding + * machine check + */ + mchk->cr14 |= irq->u.mchk.cr14; + mchk->mcic |= irq->u.mchk.mcic; + mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; + mchk->failing_storage_address = irq->u.mchk.failing_storage_address; + memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, + sizeof(mchk->fixed_logout)); + if (mchk->mcic & MCHK_EX_MASK) + set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); + else if (mchk->mcic & MCHK_REP_MASK) + set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); + return 0; +} + +static int __inject_ckc(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, + 0, 0, 2); + + set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + +static int __inject_cpu_timer(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, + 0, 0, 2); + + set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + return 0; +} + + struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid) { @@ -851,7 +1198,17 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) dst_vcpu = kvm_get_vcpu(kvm, sigcpu); li = &dst_vcpu->arch.local_int; spin_lock(&li->lock); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + switch (inti->type) { + case KVM_S390_MCHK: + atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); + break; + default: + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + break; + } spin_unlock(&li->lock); kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); unlock_fi: @@ -920,92 +1277,85 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, __inject_vm(kvm, inti); } -int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int) +int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, + struct kvm_s390_irq *irq) { - struct kvm_s390_local_interrupt *li; - struct kvm_s390_interrupt_info *inti; + irq->type = s390int->type; + switch (irq->type) { + case KVM_S390_PROGRAM_INT: + if (s390int->parm & 0xffff0000) + return -EINVAL; + irq->u.pgm.code = s390int->parm; + break; + case KVM_S390_SIGP_SET_PREFIX: + irq->u.prefix.address = s390int->parm; + break; + case KVM_S390_INT_EXTERNAL_CALL: + if (irq->u.extcall.code & 0xffff0000) + return -EINVAL; + irq->u.extcall.code = s390int->parm; + break; + case KVM_S390_INT_EMERGENCY: + if (irq->u.emerg.code & 0xffff0000) + return -EINVAL; + irq->u.emerg.code = s390int->parm; + break; + case KVM_S390_MCHK: + irq->u.mchk.mcic = s390int->parm64; + break; + } + return 0; +} - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return -ENOMEM; +int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + int rc; - switch (s390int->type) { + spin_lock(&li->lock); + switch (irq->type) { case KVM_S390_PROGRAM_INT: - if (s390int->parm & 0xffff0000) { - kfree(inti); - return -EINVAL; - } - inti->type = s390int->type; - inti->pgm.code = s390int->parm; VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", - s390int->parm); + irq->u.pgm.code); + rc = __inject_prog(vcpu, irq); break; case KVM_S390_SIGP_SET_PREFIX: - inti->prefix.address = s390int->parm; - inti->type = s390int->type; - VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", - s390int->parm); + rc = __inject_set_prefix(vcpu, irq); break; case KVM_S390_SIGP_STOP: + rc = __inject_sigp_stop(vcpu, irq); + break; case KVM_S390_RESTART: + rc = __inject_sigp_restart(vcpu, irq); + break; case KVM_S390_INT_CLOCK_COMP: + rc = __inject_ckc(vcpu); + break; case KVM_S390_INT_CPU_TIMER: - VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); - inti->type = s390int->type; + rc = __inject_cpu_timer(vcpu); break; case KVM_S390_INT_EXTERNAL_CALL: - if (s390int->parm & 0xffff0000) { - kfree(inti); - return -EINVAL; - } - VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", - s390int->parm); - inti->type = s390int->type; - inti->extcall.code = s390int->parm; + rc = __inject_extcall(vcpu, irq); break; case KVM_S390_INT_EMERGENCY: - if (s390int->parm & 0xffff0000) { - kfree(inti); - return -EINVAL; - } - VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); - inti->type = s390int->type; - inti->emerg.code = s390int->parm; + rc = __inject_sigp_emergency(vcpu, irq); break; case KVM_S390_MCHK: - VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", - s390int->parm64); - inti->type = s390int->type; - inti->mchk.mcic = s390int->parm64; + rc = __inject_mchk(vcpu, irq); break; case KVM_S390_INT_PFAULT_INIT: - inti->type = s390int->type; - inti->ext.ext_params2 = s390int->parm64; + rc = __inject_pfault_init(vcpu, irq); break; case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: default: - kfree(inti); - return -EINVAL; + rc = -EINVAL; } - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, - s390int->parm64, 2); - - li = &vcpu->arch.local_int; - spin_lock(&li->lock); - if (inti->type == KVM_S390_PROGRAM_INT) - list_add(&inti->list, &li->list); - else - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); - if (inti->type == KVM_S390_SIGP_STOP) - li->action_bits |= ACTION_STOP_ON_STOP; - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); spin_unlock(&li->lock); - kvm_s390_vcpu_wakeup(vcpu); - return 0; + if (!rc) + kvm_s390_vcpu_wakeup(vcpu); + return rc; } void kvm_s390_clear_float_irqs(struct kvm *kvm) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6b049ee75a56..3e09801e3104 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -81,10 +81,17 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, + { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, + { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, + { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, + { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, + { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, + { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, + { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, { "diagnose_10", VCPU_STAT(diagnose_10) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, { "diagnose_9c", VCPU_STAT(diagnose_9c) }, @@ -453,6 +460,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); init_waitqueue_head(&kvm->arch.ipte_wq); + mutex_init(&kvm->arch.ipte_mutex); debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); @@ -711,7 +719,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, } spin_lock_init(&vcpu->arch.local_int.lock); - INIT_LIST_HEAD(&vcpu->arch.local_int.list); vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; @@ -1114,13 +1121,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, unsigned long token) { struct kvm_s390_interrupt inti; - inti.parm64 = token; + struct kvm_s390_irq irq; if (start_token) { - inti.type = KVM_S390_INT_PFAULT_INIT; - WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); + irq.u.ext.ext_params2 = token; + irq.type = KVM_S390_INT_PFAULT_INIT; + WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); } else { inti.type = KVM_S390_INT_PFAULT_DONE; + inti.parm64 = token; WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); } } @@ -1614,11 +1623,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; + struct kvm_s390_irq s390irq; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) break; - r = kvm_s390_inject_vcpu(vcpu, &s390int); + if (s390int_to_s390irq(&s390int, &s390irq)) + return -EINVAL; + r = kvm_s390_inject_vcpu(vcpu, &s390irq); break; } case KVM_S390_STORE_STATUS: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 244d02303182..a8f3d9b71c11 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -24,8 +24,6 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); /* declare vfacilities extern */ extern unsigned long *vfacilities; -int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); - /* Transactional Memory Execution related macros */ #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) #define TDB_FORMAT1 1 @@ -144,7 +142,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm); int __must_check kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, - struct kvm_s390_interrupt *s390int); + struct kvm_s390_irq *irq); int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid); @@ -152,6 +150,10 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *inti); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); +/* implemented in intercept.c */ +void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc); +int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); + /* implemented in priv.c */ int is_valid_psw(psw_t *psw); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); @@ -222,6 +224,9 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); } +int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, + struct kvm_s390_irq *s390irq); + /* implemented in interrupt.c */ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int psw_extint_disabled(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index f47cb0c6d906..1be578d64dfc 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -180,21 +180,18 @@ static int handle_skey(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - vcpu->arch.sie_block->gpsw.addr = - __rewind_psw(vcpu->arch.sie_block->gpsw, 4); + kvm_s390_rewind_psw(vcpu, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; } static int handle_ipte_interlock(struct kvm_vcpu *vcpu) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; - vcpu->stat.instruction_ipte_interlock++; - if (psw_bits(*psw).p) + if (psw_bits(vcpu->arch.sie_block->gpsw).p) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); - psw->addr = __rewind_psw(*psw, 4); + kvm_s390_rewind_psw(vcpu, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); return 0; } @@ -650,10 +647,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; - if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { - if (kvm_s390_check_low_addr_protection(vcpu, start)) - return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); - } + start = kvm_s390_logical_to_effective(vcpu, start); switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { case 0x00000000: @@ -669,6 +663,12 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) default: return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); } + + if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { + if (kvm_s390_check_low_addr_protection(vcpu, start)) + return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); + } + while (start < end) { unsigned long useraddr, abs_addr; @@ -725,8 +725,7 @@ static int handle_essa(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* Rewind PSW to repeat the ESSA instruction */ - vcpu->arch.sie_block->gpsw.addr = - __rewind_psw(vcpu->arch.sie_block->gpsw, 4); + kvm_s390_rewind_psw(vcpu, 4); vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); down_read(&gmap->mm->mmap_sem); @@ -769,8 +768,8 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - u32 val = 0; - int reg, rc; + int reg, rc, nr_regs; + u32 ctl_array[16]; u64 ga; vcpu->stat.instruction_lctl++; @@ -786,19 +785,20 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); + nr_regs = ((reg3 - reg1) & 0xf) + 1; + rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); reg = reg1; + nr_regs = 0; do { - rc = read_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; - vcpu->arch.sie_block->gcr[reg] |= val; - ga += 4; + vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); - + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } @@ -806,9 +806,9 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; + int reg, rc, nr_regs; + u32 ctl_array[16]; u64 ga; - u32 val; - int reg, rc; vcpu->stat.instruction_stctl++; @@ -824,26 +824,24 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); reg = reg1; + nr_regs = 0; do { - val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; - rc = write_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); - ga += 4; + ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); - - return 0; + rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - u64 ga, val; - int reg, rc; + int reg, rc, nr_regs; + u64 ctl_array[16]; + u64 ga; vcpu->stat.instruction_lctlg++; @@ -855,22 +853,22 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - reg = reg1; - VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); + nr_regs = ((reg3 - reg1) & 0xf) + 1; + rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + reg = reg1; + nr_regs = 0; do { - rc = read_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); - vcpu->arch.sie_block->gcr[reg] = val; - ga += 8; + vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); - + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } @@ -878,8 +876,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - u64 ga, val; - int reg, rc; + int reg, rc, nr_regs; + u64 ctl_array[16]; + u64 ga; vcpu->stat.instruction_stctg++; @@ -891,23 +890,19 @@ static int handle_stctg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - reg = reg1; - VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); + reg = reg1; + nr_regs = 0; do { - val = vcpu->arch.sie_block->gcr[reg]; - rc = write_guest(vcpu, ga, &val, sizeof(val)); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); - ga += 8; + ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); - - return 0; + rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } static const intercept_handler_t eb_handlers[256] = { diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index cf243ba3d50f..6651f9f73973 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -20,20 +20,13 @@ #include "kvm-s390.h" #include "trace.h" -static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, +static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_vcpu *dst_vcpu = NULL; int cpuflags; int rc; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int; cpuflags = atomic_read(li->cpuflags); @@ -48,55 +41,53 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, rc = SIGP_CC_STATUS_STORED; } - VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); + VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, + rc); return rc; } -static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) +static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu) { - struct kvm_s390_interrupt s390int = { + struct kvm_s390_irq irq = { .type = KVM_S390_INT_EMERGENCY, - .parm = vcpu->vcpu_id, + .u.emerg.code = vcpu->vcpu_id, }; - struct kvm_vcpu *dst_vcpu = NULL; int rc = 0; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - - rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); if (!rc) - VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); + VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", + dst_vcpu->vcpu_id); return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; } -static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, +static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) +{ + return __inject_sigp_emergency(vcpu, dst_vcpu); +} + +static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u16 asn, u64 *reg) { - struct kvm_vcpu *dst_vcpu = NULL; const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; u16 p_asn, s_asn; psw_t *psw; u32 flags; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); psw = &dst_vcpu->arch.sie_block->gpsw; p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ - /* Deliver the emergency signal? */ + /* Inject the emergency signal? */ if (!(flags & CPUSTAT_STOPPED) || (psw->mask & psw_int_mask) != psw_int_mask || ((flags & CPUSTAT_WAIT) && psw->addr != 0) || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) { - return __sigp_emergency(vcpu, cpu_addr); + return __inject_sigp_emergency(vcpu, dst_vcpu); } else { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; @@ -104,23 +95,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, } } -static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) +static int __sigp_external_call(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu) { - struct kvm_s390_interrupt s390int = { + struct kvm_s390_irq irq = { .type = KVM_S390_INT_EXTERNAL_CALL, - .parm = vcpu->vcpu_id, + .u.extcall.code = vcpu->vcpu_id, }; - struct kvm_vcpu *dst_vcpu = NULL; int rc; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - - rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int); + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); if (!rc) - VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); + VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", + dst_vcpu->vcpu_id); return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; } @@ -128,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) { struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; - struct kvm_s390_interrupt_info *inti; int rc = SIGP_CC_ORDER_CODE_ACCEPTED; - inti = kzalloc(sizeof(*inti), GFP_ATOMIC); - if (!inti) - return -ENOMEM; - inti->type = KVM_S390_SIGP_STOP; - spin_lock(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) { /* another SIGP STOP is pending */ - kfree(inti); rc = SIGP_CC_BUSY; goto out; } if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { - kfree(inti); if ((action & ACTION_STORE_ON_STOP) != 0) rc = -ESHUTDOWN; goto out; } - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); li->action_bits |= action; atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); kvm_s390_vcpu_wakeup(dst_vcpu); @@ -160,23 +138,27 @@ out: return rc; } -static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) +static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { - struct kvm_vcpu *dst_vcpu = NULL; int rc; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; + rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); + VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; + return rc; +} - rc = __inject_sigp_stop(dst_vcpu, action); +static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u64 *reg) +{ + int rc; - VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); + rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | + ACTION_STORE_ON_STOP); + VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", + dst_vcpu->vcpu_id); - if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { + if (rc == -ESHUTDOWN) { /* If the CPU has already been stopped, we still have * to save the status when doing stop-and-store. This * has to be done after unlocking all spinlocks. */ @@ -212,18 +194,12 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) return rc; } -static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, - u64 *reg) +static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, + u32 address, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_vcpu *dst_vcpu = NULL; - struct kvm_s390_interrupt_info *inti; int rc; - if (cpu_addr < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int; /* @@ -238,46 +214,34 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, return SIGP_CC_STATUS_STORED; } - inti = kzalloc(sizeof(*inti), GFP_KERNEL); - if (!inti) - return SIGP_CC_BUSY; - spin_lock(&li->lock); /* cpu must be in stopped state */ if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; rc = SIGP_CC_STATUS_STORED; - kfree(inti); goto out_li; } - inti->type = KVM_S390_SIGP_SET_PREFIX; - inti->prefix.address = address; - - list_add_tail(&inti->list, &li->list); - atomic_set(&li->active, 1); + li->irq.prefix.address = address; + set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); kvm_s390_vcpu_wakeup(dst_vcpu); rc = SIGP_CC_ORDER_CODE_ACCEPTED; - VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); + VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id, + address); out_li: spin_unlock(&li->lock); return rc; } -static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, - u32 addr, u64 *reg) +static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, + u32 addr, u64 *reg) { - struct kvm_vcpu *dst_vcpu = NULL; int flags; int rc; - if (cpu_id < KVM_MAX_VCPUS) - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - spin_lock(&dst_vcpu->arch.local_int.lock); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); spin_unlock(&dst_vcpu->arch.local_int.lock); @@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, return rc; } -static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, - u64 *reg) +static int __sigp_sense_running(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u64 *reg) { struct kvm_s390_local_interrupt *li; - struct kvm_vcpu *dst_vcpu = NULL; int rc; - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int; if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { /* running */ @@ -321,26 +278,19 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, rc = SIGP_CC_STATUS_STORED; } - VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, - rc); + VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", + dst_vcpu->vcpu_id, rc); return rc; } -/* Test whether the destination CPU is available and not busy */ -static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) +static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u8 order_code) { - struct kvm_s390_local_interrupt *li; - int rc = SIGP_CC_ORDER_CODE_ACCEPTED; - struct kvm_vcpu *dst_vcpu = NULL; - - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; + struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; + /* handle (RE)START in user space */ + int rc = -EOPNOTSUPP; - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - li = &dst_vcpu->arch.local_int; spin_lock(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) rc = SIGP_CC_BUSY; @@ -349,90 +299,131 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) return rc; } -int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) +static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu, u8 order_code) { - int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; - int r3 = vcpu->arch.sie_block->ipa & 0x000f; - u32 parameter; - u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; - u8 order_code; - int rc; + /* handle (INITIAL) CPU RESET in user space */ + return -EOPNOTSUPP; +} - /* sigp in userspace can exit */ - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); +static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, + struct kvm_vcpu *dst_vcpu) +{ + /* handle unknown orders in user space */ + return -EOPNOTSUPP; +} - order_code = kvm_s390_get_base_disp_rs(vcpu); +static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, + u16 cpu_addr, u32 parameter, u64 *status_reg) +{ + int rc; + struct kvm_vcpu *dst_vcpu; - if (r1 % 2) - parameter = vcpu->run->s.regs.gprs[r1]; - else - parameter = vcpu->run->s.regs.gprs[r1 + 1]; + if (cpu_addr >= KVM_MAX_VCPUS) + return SIGP_CC_NOT_OPERATIONAL; + + dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + if (!dst_vcpu) + return SIGP_CC_NOT_OPERATIONAL; - trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; - rc = __sigp_sense(vcpu, cpu_addr, - &vcpu->run->s.regs.gprs[r1]); + rc = __sigp_sense(vcpu, dst_vcpu, status_reg); break; case SIGP_EXTERNAL_CALL: vcpu->stat.instruction_sigp_external_call++; - rc = __sigp_external_call(vcpu, cpu_addr); + rc = __sigp_external_call(vcpu, dst_vcpu); break; case SIGP_EMERGENCY_SIGNAL: vcpu->stat.instruction_sigp_emergency++; - rc = __sigp_emergency(vcpu, cpu_addr); + rc = __sigp_emergency(vcpu, dst_vcpu); break; case SIGP_STOP: vcpu->stat.instruction_sigp_stop++; - rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); + rc = __sigp_stop(vcpu, dst_vcpu); break; case SIGP_STOP_AND_STORE_STATUS: - vcpu->stat.instruction_sigp_stop++; - rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | - ACTION_STOP_ON_STOP); + vcpu->stat.instruction_sigp_stop_store_status++; + rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); break; case SIGP_STORE_STATUS_AT_ADDRESS: - rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter, - &vcpu->run->s.regs.gprs[r1]); - break; - case SIGP_SET_ARCHITECTURE: - vcpu->stat.instruction_sigp_arch++; - rc = __sigp_set_arch(vcpu, parameter); + vcpu->stat.instruction_sigp_store_status++; + rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, + status_reg); break; case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; - rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, - &vcpu->run->s.regs.gprs[r1]); + rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); break; case SIGP_COND_EMERGENCY_SIGNAL: - rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter, - &vcpu->run->s.regs.gprs[r1]); + vcpu->stat.instruction_sigp_cond_emergency++; + rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, + status_reg); break; case SIGP_SENSE_RUNNING: vcpu->stat.instruction_sigp_sense_running++; - rc = __sigp_sense_running(vcpu, cpu_addr, - &vcpu->run->s.regs.gprs[r1]); + rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); break; case SIGP_START: - rc = sigp_check_callable(vcpu, cpu_addr); - if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) - rc = -EOPNOTSUPP; /* Handle START in user space */ + vcpu->stat.instruction_sigp_start++; + rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; - rc = sigp_check_callable(vcpu, cpu_addr); - if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) { - VCPU_EVENT(vcpu, 4, - "sigp restart %x to handle userspace", - cpu_addr); - /* user space must know about restart */ - rc = -EOPNOTSUPP; - } + rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); + break; + case SIGP_INITIAL_CPU_RESET: + vcpu->stat.instruction_sigp_init_cpu_reset++; + rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); + break; + case SIGP_CPU_RESET: + vcpu->stat.instruction_sigp_cpu_reset++; + rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); + break; + default: + vcpu->stat.instruction_sigp_unknown++; + rc = __prepare_sigp_unknown(vcpu, dst_vcpu); + } + + if (rc == -EOPNOTSUPP) + VCPU_EVENT(vcpu, 4, + "sigp order %u -> cpu %x: handled in user space", + order_code, dst_vcpu->vcpu_id); + + return rc; +} + +int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) +{ + int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; + int r3 = vcpu->arch.sie_block->ipa & 0x000f; + u32 parameter; + u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; + u8 order_code; + int rc; + + /* sigp in userspace can exit */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + order_code = kvm_s390_get_base_disp_rs(vcpu); + + if (r1 % 2) + parameter = vcpu->run->s.regs.gprs[r1]; + else + parameter = vcpu->run->s.regs.gprs[r1 + 1]; + + trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); + switch (order_code) { + case SIGP_SET_ARCHITECTURE: + vcpu->stat.instruction_sigp_arch++; + rc = __sigp_set_arch(vcpu, parameter); break; default: - return -EOPNOTSUPP; + rc = handle_sigp_dst(vcpu, order_code, cpu_addr, + parameter, + &vcpu->run->s.regs.gprs[r1]); } if (rc < 0) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 71c7eff2c89f..be99357d238c 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -844,7 +844,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, down_read(&mm->mmap_sem); retry: - ptep = get_locked_pte(current->mm, addr, &ptl); + ptep = get_locked_pte(mm, addr, &ptl); if (unlikely(!ptep)) { up_read(&mm->mmap_sem); return -EFAULT; @@ -888,6 +888,45 @@ retry: } EXPORT_SYMBOL(set_guest_storage_key); +unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) +{ + spinlock_t *ptl; + pgste_t pgste; + pte_t *ptep; + uint64_t physaddr; + unsigned long key = 0; + + down_read(&mm->mmap_sem); + ptep = get_locked_pte(mm, addr, &ptl); + if (unlikely(!ptep)) { + up_read(&mm->mmap_sem); + return -EFAULT; + } + pgste = pgste_get_lock(ptep); + + if (pte_val(*ptep) & _PAGE_INVALID) { + key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56; + key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56; + key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48; + key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48; + } else { + physaddr = pte_val(*ptep) & PAGE_MASK; + key = page_get_storage_key(physaddr); + + /* Reflect guest's logical view, not physical */ + if (pgste_val(pgste) & PGSTE_GR_BIT) + key |= _PAGE_REFERENCED; + if (pgste_val(pgste) & PGSTE_GC_BIT) + key |= _PAGE_CHANGED; + } + + pgste_set_unlock(ptep, pgste); + pte_unmap_unlock(ptep, ptl); + up_read(&mm->mmap_sem); + return key; +} +EXPORT_SYMBOL(get_guest_storage_key); + #else /* CONFIG_PGSTE */ static inline int page_table_with_pgste(struct page *page) |