diff options
author | Linus Torvalds | 2015-09-10 16:42:49 -0700 |
---|---|---|
committer | Linus Torvalds | 2015-09-10 16:42:49 -0700 |
commit | 519f526d391b0ef775aeb04c4b6f632ea6b3ee50 (patch) | |
tree | 36985d7882734c136fc3c9a48e9d9abf9e97c1f1 /arch/powerpc/kvm | |
parent | 06ab838c2024db468855118087db16d8fa905ddc (diff) | |
parent | ba60c41ae392b473a1897faa0b8739fcb8759d69 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more kvm updates from Paolo Bonzini:
"ARM:
- Full debug support for arm64
- Active state switching for timer interrupts
- Lazy FP/SIMD save/restore for arm64
- Generic ARMv8 target
PPC:
- Book3S: A few bug fixes
- Book3S: Allow micro-threading on POWER8
x86:
- Compiler warnings
Generic:
- Adaptive polling for guest halt"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (49 commits)
kvm: irqchip: fix memory leak
kvm: move new trace event outside #ifdef CONFIG_KVM_ASYNC_PF
KVM: trace kvm_halt_poll_ns grow/shrink
KVM: dynamic halt-polling
KVM: make halt_poll_ns per-vCPU
Silence compiler warning in arch/x86/kvm/emulate.c
kvm: compile process_smi_save_seg_64() only for x86_64
KVM: x86: avoid uninitialized variable warning
KVM: PPC: Book3S: Fix typo in top comment about locking
KVM: PPC: Book3S: Fix size of the PSPB register
KVM: PPC: Book3S HV: Exit on H_DOORBELL if HOST_IPI is set
KVM: PPC: Book3S HV: Fix race in starting secondary threads
KVM: PPC: Book3S: correct width in XER handling
KVM: PPC: Book3S HV: Fix preempted vcore stolen time calculation
KVM: PPC: Book3S HV: Fix preempted vcore list locking
KVM: PPC: Book3S HV: Implement H_CLEAR_REF and H_CLEAR_MOD
KVM: PPC: Book3S HV: Fix bug in dirty page tracking
KVM: PPC: Book3S HV: Fix race in reading change bit when removing HPTE
KVM: PPC: Book3S HV: Implement dynamic micro-threading on POWER8
KVM: PPC: Book3S HV: Make use of unused threads when running guests
...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/Kconfig | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_emulate.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 664 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 32 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 161 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_xics.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 137 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_segment.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 2 |
17 files changed, 899 insertions, 134 deletions
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 3caec2c42105..c2024ac9d4e8 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -74,14 +74,14 @@ config KVM_BOOK3S_64 If unsure, say N. config KVM_BOOK3S_64_HV - tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" + tristate "KVM for POWER7 and later using hypervisor mode in host" depends on KVM_BOOK3S_64 && PPC_POWERNV select KVM_BOOK3S_HV_POSSIBLE select MMU_NOTIFIER select CMA ---help--- Support running unmodified book3s_64 guest kernels in - virtual machines on POWER7 and PPC970 processors that have + virtual machines on POWER7 and newer processors that have hypervisor mode available to the host. If you say Y here, KVM will use the hardware virtualization @@ -89,8 +89,8 @@ config KVM_BOOK3S_64_HV guest operating systems will run at full hardware speed using supervisor and user modes. However, this also means that KVM is not usable under PowerVM (pHyp), is only usable - on POWER7 (or later) processors and PPC970-family processors, - and cannot emulate a different processor from the host processor. + on POWER7 or later processors, and cannot emulate a + different processor from the host processor. If unsure, say N. diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 6d6398f4d632..d75bf325f54a 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -240,7 +240,8 @@ void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); } -int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) +static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, + unsigned int priority) { int deliver = 1; int vec = 0; diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 2035d16a9262..d5c9bfeb0c9c 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -26,6 +26,7 @@ #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> +#include "book3s.h" /* #define DEBUG_MMU */ /* #define DEBUG_SR */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index b982d925c710..79ad35abd196 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -28,6 +28,7 @@ #include <asm/mmu_context.h> #include <asm/hw_irq.h> #include "trace_pr.h" +#include "book3s.h" #define PTE_SIZE 12 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index dab68b7af3f2..1f9c0a17f445 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -761,6 +761,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, /* Harvest R and C */ rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; + if (rcbits & HPTE_R_C) + kvmppc_update_rmap_change(rmapp, psize); if (rcbits & ~rev[i].guest_rpte) { rev[i].guest_rpte = ptel | rcbits; note_hpte_modification(kvm, &rev[i]); @@ -927,8 +929,12 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) retry: lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_CHANGED) { - *rmapp &= ~KVMPPC_RMAP_CHANGED; + long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER) + >> KVMPPC_RMAP_CHG_SHIFT; + *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER); npages_dirty = 1; + if (change_order > PAGE_SHIFT) + npages_dirty = 1ul << (change_order - PAGE_SHIFT); } if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 5a2bc4b0dfe5..2afdb9c0937d 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -23,6 +23,7 @@ #include <asm/reg.h> #include <asm/switch_to.h> #include <asm/time.h> +#include "book3s.h" #define OP_19_XOP_RFID 18 #define OP_19_XOP_RFI 50 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a9f753fb73a8..9754e6815e52 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -81,6 +81,12 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); #define MPP_BUFFER_ORDER 3 #endif +static int dynamic_mt_modes = 6; +module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); +static int target_smt_mode; +module_param(target_smt_mode, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); @@ -114,7 +120,7 @@ static bool kvmppc_ipi_thread(int cpu) static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) { - int cpu = vcpu->cpu; + int cpu; wait_queue_head_t *wqp; wqp = kvm_arch_vcpu_wq(vcpu); @@ -123,10 +129,11 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) ++vcpu->stat.halt_wakeup; } - if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) + if (kvmppc_ipi_thread(vcpu->arch.thread_cpu)) return; /* CPU points to the first thread of the core */ + cpu = vcpu->cpu; if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) smp_send_reschedule(cpu); } @@ -164,6 +171,27 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) * they should never fail.) */ +static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) +{ + unsigned long flags; + + spin_lock_irqsave(&vc->stoltb_lock, flags); + vc->preempt_tb = mftb(); + spin_unlock_irqrestore(&vc->stoltb_lock, flags); +} + +static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) +{ + unsigned long flags; + + spin_lock_irqsave(&vc->stoltb_lock, flags); + if (vc->preempt_tb != TB_NIL) { + vc->stolen_tb += mftb() - vc->preempt_tb; + vc->preempt_tb = TB_NIL; + } + spin_unlock_irqrestore(&vc->stoltb_lock, flags); +} + static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -175,14 +203,9 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) * vcpu, and once it is set to this vcpu, only this task * ever sets it to NULL. */ - if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { - spin_lock_irqsave(&vc->stoltb_lock, flags); - if (vc->preempt_tb != TB_NIL) { - vc->stolen_tb += mftb() - vc->preempt_tb; - vc->preempt_tb = TB_NIL; - } - spin_unlock_irqrestore(&vc->stoltb_lock, flags); - } + if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) + kvmppc_core_end_stolen(vc); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && vcpu->arch.busy_preempt != TB_NIL) { @@ -197,11 +220,9 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long flags; - if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { - spin_lock_irqsave(&vc->stoltb_lock, flags); - vc->preempt_tb = mftb(); - spin_unlock_irqrestore(&vc->stoltb_lock, flags); - } + if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) + kvmppc_core_start_stolen(vc); + spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) vcpu->arch.busy_preempt = mftb(); @@ -214,12 +235,12 @@ static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) kvmppc_end_cede(vcpu); } -void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) +static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; } -int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) +static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) { unsigned long pcr = 0; struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -259,7 +280,7 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) return 0; } -void kvmppc_dump_regs(struct kvm_vcpu *vcpu) +static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) { int r; @@ -292,7 +313,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu) vcpu->arch.last_inst); } -struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) +static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { int r; struct kvm_vcpu *v, *ret = NULL; @@ -641,7 +662,8 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) spin_lock(&vcore->lock); if (target->arch.state == KVMPPC_VCPU_RUNNABLE && - vcore->vcore_state != VCORE_INACTIVE) + vcore->vcore_state != VCORE_INACTIVE && + vcore->runner) target = vcore->runner; spin_unlock(&vcore->lock); @@ -1431,6 +1453,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) vcore->lpcr = kvm->arch.lpcr; vcore->first_vcpuid = core * threads_per_subcore; vcore->kvm = kvm; + INIT_LIST_HEAD(&vcore->preempt_list); vcore->mpp_buffer_is_valid = false; @@ -1655,6 +1678,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, spin_unlock(&vcore->lock); vcpu->arch.vcore = vcore; vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; + vcpu->arch.thread_cpu = -1; vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_sanity_check(vcpu); @@ -1749,6 +1773,7 @@ static int kvmppc_grab_hwthread(int cpu) /* Ensure the thread won't go into the kernel if it wakes */ tpaca->kvm_hstate.kvm_vcpu = NULL; + tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.napping = 0; smp_wmb(); tpaca->kvm_hstate.hwthread_req = 1; @@ -1780,26 +1805,32 @@ static void kvmppc_release_hwthread(int cpu) tpaca = &paca[cpu]; tpaca->kvm_hstate.hwthread_req = 0; tpaca->kvm_hstate.kvm_vcpu = NULL; + tpaca->kvm_hstate.kvm_vcore = NULL; + tpaca->kvm_hstate.kvm_split_mode = NULL; } -static void kvmppc_start_thread(struct kvm_vcpu *vcpu) +static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { int cpu; struct paca_struct *tpaca; - struct kvmppc_vcore *vc = vcpu->arch.vcore; + struct kvmppc_vcore *mvc = vc->master_vcore; - if (vcpu->arch.timer_running) { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); - vcpu->arch.timer_running = 0; + cpu = vc->pcpu; + if (vcpu) { + if (vcpu->arch.timer_running) { + hrtimer_try_to_cancel(&vcpu->arch.dec_timer); + vcpu->arch.timer_running = 0; + } + cpu += vcpu->arch.ptid; + vcpu->cpu = mvc->pcpu; + vcpu->arch.thread_cpu = cpu; } - cpu = vc->pcpu + vcpu->arch.ptid; tpaca = &paca[cpu]; - tpaca->kvm_hstate.kvm_vcore = vc; - tpaca->kvm_hstate.ptid = vcpu->arch.ptid; - vcpu->cpu = vc->pcpu; - /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */ - smp_wmb(); tpaca->kvm_hstate.kvm_vcpu = vcpu; + tpaca->kvm_hstate.ptid = cpu - mvc->pcpu; + /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ + smp_wmb(); + tpaca->kvm_hstate.kvm_vcore = mvc; if (cpu != smp_processor_id()) kvmppc_ipi_thread(cpu); } @@ -1812,12 +1843,12 @@ static void kvmppc_wait_for_nap(void) for (loops = 0; loops < 1000000; ++loops) { /* * Check if all threads are finished. - * We set the vcpu pointer when starting a thread + * We set the vcore pointer when starting a thread * and the thread clears it when finished, so we look - * for any threads that still have a non-NULL vcpu ptr. + * for any threads that still have a non-NULL vcore ptr. */ for (i = 1; i < threads_per_subcore; ++i) - if (paca[cpu + i].kvm_hstate.kvm_vcpu) + if (paca[cpu + i].kvm_hstate.kvm_vcore) break; if (i == threads_per_subcore) { HMT_medium(); @@ -1827,7 +1858,7 @@ static void kvmppc_wait_for_nap(void) } HMT_medium(); for (i = 1; i < threads_per_subcore; ++i) - if (paca[cpu + i].kvm_hstate.kvm_vcpu) + if (paca[cpu + i].kvm_hstate.kvm_vcore) pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); } @@ -1890,6 +1921,278 @@ static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); } +/* + * A list of virtual cores for each physical CPU. + * These are vcores that could run but their runner VCPU tasks are + * (or may be) preempted. + */ +struct preempted_vcore_list { + struct list_head list; + spinlock_t lock; +}; + +static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores); + +static void init_vcore_lists(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu); + spin_lock_init(&lp->lock); + INIT_LIST_HEAD(&lp->list); + } +} + +static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) +{ + struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); + + vc->vcore_state = VCORE_PREEMPT; + vc->pcpu = smp_processor_id(); + if (vc->num_threads < threads_per_subcore) { + spin_lock(&lp->lock); + list_add_tail(&vc->preempt_list, &lp->list); + spin_unlock(&lp->lock); + } + + /* Start accumulating stolen time */ + kvmppc_core_start_stolen(vc); +} + +static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) +{ + struct preempted_vcore_list *lp; + + kvmppc_core_end_stolen(vc); + if (!list_empty(&vc->preempt_list)) { + lp = &per_cpu(preempted_vcores, vc->pcpu); + spin_lock(&lp->lock); + list_del_init(&vc->preempt_list); + spin_unlock(&lp->lock); + } + vc->vcore_state = VCORE_INACTIVE; +} + +/* + * This stores information about the virtual cores currently + * assigned to a physical core. + */ +struct core_info { + int n_subcores; + int max_subcore_threads; + int total_threads; + int subcore_threads[MAX_SUBCORES]; + struct kvm *subcore_vm[MAX_SUBCORES]; + struct list_head vcs[MAX_SUBCORES]; +}; + +/* + * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 + * respectively in 2-way micro-threading (split-core) mode. + */ +static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; + +static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) +{ + int sub; + + memset(cip, 0, sizeof(*cip)); + cip->n_subcores = 1; + cip->max_subcore_threads = vc->num_threads; + cip->total_threads = vc->num_threads; + cip->subcore_threads[0] = vc->num_threads; + cip->subcore_vm[0] = vc->kvm; + for (sub = 0; sub < MAX_SUBCORES; ++sub) + INIT_LIST_HEAD(&cip->vcs[sub]); + list_add_tail(&vc->preempt_list, &cip->vcs[0]); +} + +static bool subcore_config_ok(int n_subcores, int n_threads) +{ + /* Can only dynamically split if unsplit to begin with */ + if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) + return false; + if (n_subcores > MAX_SUBCORES) + return false; + if (n_subcores > 1) { + if (!(dynamic_mt_modes & 2)) + n_subcores = 4; + if (n_subcores > 2 && !(dynamic_mt_modes & 4)) + return false; + } + + return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; +} + +static void init_master_vcore(struct kvmppc_vcore *vc) +{ + vc->master_vcore = vc; + vc->entry_exit_map = 0; + vc->in_guest = 0; + vc->napping_threads = 0; + vc->conferring_threads = 0; +} + +/* + * See if the existing subcores can be split into 3 (or fewer) subcores + * of at most two threads each, so we can fit in another vcore. This + * assumes there are at most two subcores and at most 6 threads in total. + */ +static bool can_split_piggybacked_subcores(struct core_info *cip) +{ + int sub, new_sub; + int large_sub = -1; + int thr; + int n_subcores = cip->n_subcores; + struct kvmppc_vcore *vc, *vcnext; + struct kvmppc_vcore *master_vc = NULL; + + for (sub = 0; sub < cip->n_subcores; ++sub) { + if (cip->subcore_threads[sub] <= 2) + continue; + if (large_sub >= 0) + return false; + large_sub = sub; + vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore, + preempt_list); + if (vc->num_threads > 2) + return false; + n_subcores += (cip->subcore_threads[sub] - 1) >> 1; + } + if (n_subcores > 3 || large_sub < 0) + return false; + + /* + * Seems feasible, so go through and move vcores to new subcores. + * Note that when we have two or more vcores in one subcore, + * all those vcores must have only one thread each. + */ + new_sub = cip->n_subcores; + thr = 0; + sub = large_sub; + list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) { + if (thr >= 2) { + list_del(&vc->preempt_list); + list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]); + /* vc->num_threads must be 1 */ + if (++cip->subcore_threads[new_sub] == 1) { + cip->subcore_vm[new_sub] = vc->kvm; + init_master_vcore(vc); + master_vc = vc; + ++cip->n_subcores; + } else { + vc->master_vcore = master_vc; + ++new_sub; + } + } + thr += vc->num_threads; + } + cip->subcore_threads[large_sub] = 2; + cip->max_subcore_threads = 2; + + return true; +} + +static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) +{ + int n_threads = vc->num_threads; + int sub; + + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return false; + + if (n_threads < cip->max_subcore_threads) + n_threads = cip->max_subcore_threads; + if (subcore_config_ok(cip->n_subcores + 1, n_threads)) { + cip->max_subcore_threads = n_threads; + } else if (cip->n_subcores <= 2 && cip->total_threads <= 6 && + vc->num_threads <= 2) { + /* + * We may be able to fit another subcore in by + * splitting an existing subcore with 3 or 4 + * threads into two 2-thread subcores, or one + * with 5 or 6 threads into three subcores. + * We can only do this if those subcores have + * piggybacked virtual cores. + */ + if (!can_split_piggybacked_subcores(cip)) + return false; + } else { + return false; + } + + sub = cip->n_subcores; + ++cip->n_subcores; + cip->total_threads += vc->num_threads; + cip->subcore_threads[sub] = vc->num_threads; + cip->subcore_vm[sub] = vc->kvm; + init_master_vcore(vc); + list_del(&vc->preempt_list); + list_add_tail(&vc->preempt_list, &cip->vcs[sub]); + + return true; +} + +static bool can_piggyback_subcore(struct kvmppc_vcore *pvc, + struct core_info *cip, int sub) +{ + struct kvmppc_vcore *vc; + int n_thr; + + vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore, + preempt_list); + + /* require same VM and same per-core reg values */ + if (pvc->kvm != vc->kvm || + pvc->tb_offset != vc->tb_offset || + pvc->pcr != vc->pcr || + pvc->lpcr != vc->lpcr) + return false; + + /* P8 guest with > 1 thread per core would see wrong TIR value */ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + (vc->num_threads > 1 || pvc->num_threads > 1)) + return false; + + n_thr = cip->subcore_threads[sub] + pvc->num_threads; + if (n_thr > cip->max_subcore_threads) { + if (!subcore_config_ok(cip->n_subcores, n_thr)) + return false; + cip->max_subcore_threads = n_thr; + } + + cip->total_threads += pvc->num_threads; + cip->subcore_threads[sub] = n_thr; + pvc->master_vcore = vc; + list_del(&pvc->preempt_list); + list_add_tail(&pvc->preempt_list, &cip->vcs[sub]); + + return true; +} + +/* + * Work out whether it is possible to piggyback the execution of + * vcore *pvc onto the execution of the other vcores described in *cip. + */ +static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip, + int target_threads) +{ + int sub; + + if (cip->total_threads + pvc->num_threads > target_threads) + return false; + for (sub = 0; sub < cip->n_subcores; ++sub) + if (cip->subcore_threads[sub] && + can_piggyback_subcore(pvc, cip, sub)) + return true; + + if (can_dynamic_split(pvc, cip)) + return true; + + return false; +} + static void prepare_threads(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu, *vnext; @@ -1909,12 +2212,45 @@ static void prepare_threads(struct kvmppc_vcore *vc) } } -static void post_guest_process(struct kvmppc_vcore *vc) +static void collect_piggybacks(struct core_info *cip, int target_threads) +{ + struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); + struct kvmppc_vcore *pvc, *vcnext; + + spin_lock(&lp->lock); + list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { + if (!spin_trylock(&pvc->lock)) + continue; + prepare_threads(pvc); + if (!pvc->n_runnable) { + list_del_init(&pvc->preempt_list); + if (pvc->runner == NULL) { + pvc->vcore_state = VCORE_INACTIVE; + kvmppc_core_end_stolen(pvc); + } + spin_unlock(&pvc->lock); + continue; + } + if (!can_piggyback(pvc, cip, target_threads)) { + spin_unlock(&pvc->lock); + continue; + } + kvmppc_core_end_stolen(pvc); + pvc->vcore_state = VCORE_PIGGYBACK; + if (cip->total_threads >= target_threads) + break; + } + spin_unlock(&lp->lock); +} + +static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) { + int still_running = 0; u64 now; long ret; struct kvm_vcpu *vcpu, *vnext; + spin_lock(&vc->lock); now = get_tb(); list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, arch.run_list) { @@ -1933,17 +2269,36 @@ static void post_guest_process(struct kvmppc_vcore *vc) vcpu->arch.ret = ret; vcpu->arch.trap = 0; - if (vcpu->arch.ceded) { - if (!is_kvmppc_resume_guest(ret)) - kvmppc_end_cede(vcpu); - else + if (is_kvmppc_resume_guest(vcpu->arch.ret)) { + if (vcpu->arch.pending_exceptions) + kvmppc_core_prepare_to_enter(vcpu); + if (vcpu->arch.ceded) kvmppc_set_timer(vcpu); - } - if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { + else + ++still_running; + } else { kvmppc_remove_runnable(vc, vcpu); wake_up(&vcpu->arch.cpu_run); } } + list_del_init(&vc->preempt_list); + if (!is_master) { + if (still_running > 0) { + kvmppc_vcore_preempt(vc); + } else if (vc->runner) { + vc->vcore_state = VCORE_PREEMPT; + kvmppc_core_start_stolen(vc); + } else { + vc->vcore_state = VCORE_INACTIVE; + } + if (vc->n_runnable > 0 && vc->runner == NULL) { + /* make sure there's a candidate runner awake */ + vcpu = list_first_entry(&vc->runnable_threads, + struct kvm_vcpu, arch.run_list); + wake_up(&vcpu->arch.cpu_run); + } + } + spin_unlock(&vc->lock); } /* @@ -1955,6 +2310,15 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) struct kvm_vcpu *vcpu, *vnext; int i; int srcu_idx; + struct core_info core_info; + struct kvmppc_vcore *pvc, *vcnext; + struct kvm_split_mode split_info, *sip; + int split, subcore_size, active; + int sub; + bool thr0_done; + unsigned long cmd_bit, stat_bit; + int pcpu, thr; + int target_threads; /* * Remove from the list any threads that have a signal pending @@ -1969,11 +2333,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* * Initialize *vc. */ - vc->entry_exit_map = 0; + init_master_vcore(vc); vc->preempt_tb = TB_NIL; - vc->in_guest = 0; - vc->napping_threads = 0; - vc->conferring_threads = 0; /* * Make sure we are running on primary threads, and that secondary @@ -1991,24 +2352,120 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) goto out; } + /* + * See if we could run any other vcores on the physical core + * along with this one. + */ + init_core_info(&core_info, vc); + pcpu = smp_processor_id(); + target_threads = threads_per_subcore; + if (target_smt_mode && target_smt_mode < target_threads) + target_threads = target_smt_mode; + if (vc->num_threads < target_threads) + collect_piggybacks(&core_info, target_threads); + + /* Decide on micro-threading (split-core) mode */ + subcore_size = threads_per_subcore; + cmd_bit = stat_bit = 0; + split = core_info.n_subcores; + sip = NULL; + if (split > 1) { + /* threads_per_subcore must be MAX_SMT_THREADS (8) here */ + if (split == 2 && (dynamic_mt_modes & 2)) { + cmd_bit = HID0_POWER8_1TO2LPAR; + stat_bit = HID0_POWER8_2LPARMODE; + } else { + split = 4; + cmd_bit = HID0_POWER8_1TO4LPAR; + stat_bit = HID0_POWER8_4LPARMODE; + } + subcore_size = MAX_SMT_THREADS / split; + sip = &split_info; + memset(&split_info, 0, sizeof(split_info)); + split_info.rpr = mfspr(SPRN_RPR); + split_info.pmmar = mfspr(SPRN_PMMAR); + split_info.ldbar = mfspr(SPRN_LDBAR); + split_info.subcore_size = subcore_size; + for (sub = 0; sub < core_info.n_subcores; ++sub) + split_info.master_vcs[sub] = + list_first_entry(&core_info.vcs[sub], + struct kvmppc_vcore, preempt_list); + /* order writes to split_info before kvm_split_mode pointer */ + smp_wmb(); + } + pcpu = smp_processor_id(); + for (thr = 0; thr < threads_per_subcore; ++thr) + paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip; + + /* Initiate micro-threading (split-core) if required */ + if (cmd_bit) { + unsigned long hid0 = mfspr(SPRN_HID0); + + hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS; + mb(); + mtspr(SPRN_HID0, hid0); + isync(); + for (;;) { + hid0 = mfspr(SPRN_HID0); + if (hid0 & stat_bit) + break; + cpu_relax(); + } + } - vc->pcpu = smp_processor_id(); - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { - kvmppc_start_thread(vcpu); - kvmppc_create_dtl_entry(vcpu, vc); - trace_kvm_guest_enter(vcpu); + /* Start all the threads */ + active = 0; + for (sub = 0; sub < core_info.n_subcores; ++sub) { + thr = subcore_thread_map[sub]; + thr0_done = false; + active |= 1 << thr; + list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) { + pvc->pcpu = pcpu + thr; + list_for_each_entry(vcpu, &pvc->runnable_threads, + arch.run_list) { + kvmppc_start_thread(vcpu, pvc); + kvmppc_create_dtl_entry(vcpu, pvc); + trace_kvm_guest_enter(vcpu); + if (!vcpu->arch.ptid) + thr0_done = true; + active |= 1 << (thr + vcpu->arch.ptid); + } + /* + * We need to start the first thread of each subcore + * even if it doesn't have a vcpu. + */ + if (pvc->master_vcore == pvc && !thr0_done) + kvmppc_start_thread(NULL, pvc); + thr += pvc->num_threads; + } } - /* Set this explicitly in case thread 0 doesn't have a vcpu */ - get_paca()->kvm_hstate.kvm_vcore = vc; - get_paca()->kvm_hstate.ptid = 0; + /* + * Ensure that split_info.do_nap is set after setting + * the vcore pointer in the PACA of the secondaries. + */ + smp_mb(); + if (cmd_bit) + split_info.do_nap = 1; /* ask secondaries to nap when done */ + + /* + * When doing micro-threading, poke the inactive threads as well. + * This gets them to the nap instruction after kvm_do_nap, + * which reduces the time taken to unsplit later. + */ + if (split > 1) + for (thr = 1; thr < threads_per_subcore; ++thr) + if (!(active & (1 << thr))) + kvmppc_ipi_thread(pcpu + thr); vc->vcore_state = VCORE_RUNNING; preempt_disable(); trace_kvmppc_run_core(vc, 0); - spin_unlock(&vc->lock); + for (sub = 0; sub < core_info.n_subcores; ++sub) + list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) + spin_unlock(&pvc->lock); kvm_guest_enter(); @@ -2019,32 +2476,58 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) __kvmppc_vcore_entry(); - spin_lock(&vc->lock); - if (vc->mpp_buffer) kvmppc_start_saving_l2_cache(vc); - /* disable sending of IPIs on virtual external irqs */ - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) - vcpu->cpu = -1; - /* wait for secondary threads to finish writing their state to memory */ - kvmppc_wait_for_nap(); - for (i = 0; i < threads_per_subcore; ++i) - kvmppc_release_hwthread(vc->pcpu + i); + srcu_read_unlock(&vc->kvm->srcu, srcu_idx); + + spin_lock(&vc->lock); /* prevent other vcpu threads from doing kvmppc_start_thread() now */ vc->vcore_state = VCORE_EXITING; - spin_unlock(&vc->lock); - srcu_read_unlock(&vc->kvm->srcu, srcu_idx); + /* wait for secondary threads to finish writing their state to memory */ + kvmppc_wait_for_nap(); + + /* Return to whole-core mode if we split the core earlier */ + if (split > 1) { + unsigned long hid0 = mfspr(SPRN_HID0); + unsigned long loops = 0; + + hid0 &= ~HID0_POWER8_DYNLPARDIS; + stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; + mb(); + mtspr(SPRN_HID0, hid0); + isync(); + for (;;) { + hid0 = mfspr(SPRN_HID0); + if (!(hid0 & stat_bit)) + break; + cpu_relax(); + ++loops; + } + split_info.do_nap = 0; + } + + /* Let secondaries go back to the offline loop */ + for (i = 0; i < threads_per_subcore; ++i) { + kvmppc_release_hwthread(pcpu + i); + if (sip && sip->napped[i]) + kvmppc_ipi_thread(pcpu + i); + } + + spin_unlock(&vc->lock); /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); kvm_guest_exit(); - preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) + list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub], + preempt_list) + post_guest_process(pvc, pvc == vc); spin_lock(&vc->lock); - post_guest_process(vc); + preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; @@ -2055,13 +2538,17 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * Wait for some other vcpu thread to execute us, and * wake us up when we need to handle something in the host. */ -static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) +static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, + struct kvm_vcpu *vcpu, int wait_state) { DEFINE_WAIT(wait); prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); - if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) + if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { + spin_unlock(&vc->lock); schedule(); + spin_lock(&vc->lock); + } finish_wait(&vcpu->arch.cpu_run, &wait); } @@ -2137,9 +2624,21 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) * this thread straight away and have it join in. */ if (!signal_pending(current)) { - if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) { + if (vc->vcore_state == VCORE_PIGGYBACK) { + struct kvmppc_vcore *mvc = vc->master_vcore; + if (spin_trylock(&mvc->lock)) { + if (mvc->vcore_state == VCORE_RUNNING && + !VCORE_IS_EXITING(mvc)) { + kvmppc_create_dtl_entry(vcpu, vc); + kvmppc_start_thread(vcpu, vc); + trace_kvm_guest_enter(vcpu); + } + spin_unlock(&mvc->lock); + } + } else if (vc->vcore_state == VCORE_RUNNING && + !VCORE_IS_EXITING(vc)) { kvmppc_create_dtl_entry(vcpu, vc); - kvmppc_start_thread(vcpu); + kvmppc_start_thread(vcpu, vc); trace_kvm_guest_enter(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { wake_up(&vc->wq); @@ -2149,10 +2648,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { + if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) + kvmppc_vcore_end_preempt(vc); + if (vc->vcore_state != VCORE_INACTIVE) { - spin_unlock(&vc->lock); - kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); - spin_lock(&vc->lock); + kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); continue; } list_for_each_entry_safe(v, vn, &vc->runnable_threads, @@ -2179,10 +2679,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (n_ceded == vc->n_runnable) { kvmppc_vcore_blocked(vc); } else if (need_resched()) { - vc->vcore_state = VCORE_PREEMPT; + kvmppc_vcore_preempt(vc); /* Let something else run */ cond_resched_lock(&vc->lock); - vc->vcore_state = VCORE_INACTIVE; + if (vc->vcore_state == VCORE_PREEMPT) + kvmppc_vcore_end_preempt(vc); } else { kvmppc_run_core(vc); } @@ -2191,11 +2692,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && (vc->vcore_state == VCORE_RUNNING || - vc->vcore_state == VCORE_EXITING)) { - spin_unlock(&vc->lock); - kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); - spin_lock(&vc->lock); - } + vc->vcore_state == VCORE_EXITING)) + kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { kvmppc_remove_runnable(vc, vcpu); @@ -2755,6 +3253,8 @@ static int kvmppc_book3s_init_hv(void) init_default_hcalls(); + init_vcore_lists(); + r = kvmppc_mmu_hv_init(); return r; } diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index ed2589d4593f..fd7006bf6b1a 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -110,14 +110,15 @@ void __init kvm_cma_reserve(void) long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, unsigned int yield_count) { - struct kvmppc_vcore *vc = vcpu->arch.vcore; + struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; + int ptid = local_paca->kvm_hstate.ptid; int threads_running; int threads_ceded; int threads_conferring; u64 stop = get_tb() + 10 * tb_ticks_per_usec; int rv = H_SUCCESS; /* => don't yield */ - set_bit(vcpu->arch.ptid, &vc->conferring_threads); + set_bit(ptid, &vc->conferring_threads); while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { threads_running = VCORE_ENTRY_MAP(vc); threads_ceded = vc->napping_threads; @@ -127,7 +128,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, break; } } - clear_bit(vcpu->arch.ptid, &vc->conferring_threads); + clear_bit(ptid, &vc->conferring_threads); return rv; } @@ -238,7 +239,8 @@ void kvmhv_commence_exit(int trap) { struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; - int me, ee; + struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; + int me, ee, i; /* Set our bit in the threads-exiting-guest map in the 0xff00 bits of vcore->entry_exit_map */ @@ -258,4 +260,26 @@ void kvmhv_commence_exit(int trap) */ if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); + + /* + * If we are doing dynamic micro-threading, interrupt the other + * subcores to pull them out of their guests too. + */ + if (!sip) + return; + + for (i = 0; i < MAX_SUBCORES; ++i) { + vc = sip->master_vcs[i]; + if (!vc) + break; + do { + ee = vc->entry_exit_map; + /* Already asked to exit? */ + if ((ee >> 8) != 0) + break; + } while (cmpxchg(&vc->entry_exit_map, ee, + ee | VCORE_EXIT_REQ) != ee); + if ((ee >> 8) == 0) + kvmhv_interrupt_vcore(vc, ee); + } } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index b027a89737b6..c1df9bb1e413 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -12,6 +12,7 @@ #include <linux/kvm_host.h> #include <linux/hugetlb.h> #include <linux/module.h> +#include <linux/log2.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> @@ -97,25 +98,52 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, } EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); +/* Update the changed page order field of an rmap entry */ +void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize) +{ + unsigned long order; + + if (!psize) + return; + order = ilog2(psize); + order <<= KVMPPC_RMAP_CHG_SHIFT; + if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER)) + *rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order; +} +EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change); + +/* Returns a pointer to the revmap entry for the page mapped by a HPTE */ +static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v, + unsigned long hpte_gr) +{ + struct kvm_memory_slot *memslot; + unsigned long *rmap; + unsigned long gfn; + + gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr)); + memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); + if (!memslot) + return NULL; + + rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); + return rmap; +} + /* Remove this HPTE from the chain for a real page */ static void remove_revmap_chain(struct kvm *kvm, long pte_index, struct revmap_entry *rev, unsigned long hpte_v, unsigned long hpte_r) { struct revmap_entry *next, *prev; - unsigned long gfn, ptel, head; - struct kvm_memory_slot *memslot; + unsigned long ptel, head; unsigned long *rmap; unsigned long rcbits; rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); ptel = rev->guest_rpte |= rcbits; - gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); - memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); - if (!memslot) + rmap = revmap_for_hpte(kvm, hpte_v, ptel); + if (!rmap) return; - - rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); lock_rmap(rmap); head = *rmap & KVMPPC_RMAP_INDEX; @@ -131,6 +159,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; } *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; + if (rcbits & HPTE_R_C) + kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r)); unlock_rmap(rmap); } @@ -421,14 +451,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); v = pte & ~HPTE_V_HVLOCK; if (v & HPTE_V_VALID) { - u64 pte1; - - pte1 = be64_to_cpu(hpte[1]); hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); - rb = compute_tlbie_rb(v, pte1, pte_index); + rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index); do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); - /* Read PTE low word after tlbie to get final R/C values */ - remove_revmap_chain(kvm, pte_index, rev, v, pte1); + /* + * The reference (R) and change (C) bits in a HPT + * entry can be set by hardware at any time up until + * the HPTE is invalidated and the TLB invalidation + * sequence has completed. This means that when + * removing a HPTE, we need to re-read the HPTE after + * the invalidation sequence has completed in order to + * obtain reliable values of R and C. + */ + remove_revmap_chain(kvm, pte_index, rev, v, + be64_to_cpu(hpte[1])); } r = rev->guest_rpte & ~HPTE_GR_RESERVED; note_hpte_modification(kvm, rev); @@ -655,6 +691,105 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, return H_SUCCESS; } +long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long pte_index) +{ + struct kvm *kvm = vcpu->kvm; + __be64 *hpte; + unsigned long v, r, gr; + struct revmap_entry *rev; + unsigned long *rmap; + long ret = H_NOT_FOUND; + + if (pte_index >= kvm->arch.hpt_npte) + return H_PARAMETER; + + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); + hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); + while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) + cpu_relax(); + v = be64_to_cpu(hpte[0]); + r = be64_to_cpu(hpte[1]); + if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) + goto out; + + gr = rev->guest_rpte; + if (rev->guest_rpte & HPTE_R_R) { + rev->guest_rpte &= ~HPTE_R_R; + note_hpte_modification(kvm, rev); + } + if (v & HPTE_V_VALID) { + gr |= r & (HPTE_R_R | HPTE_R_C); + if (r & HPTE_R_R) { + kvmppc_clear_ref_hpte(kvm, hpte, pte_index); + rmap = revmap_for_hpte(kvm, v, gr); + if (rmap) { + lock_rmap(rmap); + *rmap |= KVMPPC_RMAP_REFERENCED; + unlock_rmap(rmap); + } + } + } + vcpu->arch.gpr[4] = gr; + ret = H_SUCCESS; + out: + unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); + return ret; +} + +long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long pte_index) +{ + struct kvm *kvm = vcpu->kvm; + __be64 *hpte; + unsigned long v, r, gr; + struct revmap_entry *rev; + unsigned long *rmap; + long ret = H_NOT_FOUND; + + if (pte_index >= kvm->arch.hpt_npte) + return H_PARAMETER; + + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); + hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); + while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) + cpu_relax(); + v = be64_to_cpu(hpte[0]); + r = be64_to_cpu(hpte[1]); + if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) + goto out; + + gr = rev->guest_rpte; + if (gr & HPTE_R_C) { + rev->guest_rpte &= ~HPTE_R_C; + note_hpte_modification(kvm, rev); + } + if (v & HPTE_V_VALID) { + /* need to make it temporarily absent so C is stable */ + hpte[0] |= cpu_to_be64(HPTE_V_ABSENT); + kvmppc_invalidate_hpte(kvm, hpte, pte_index); + r = be64_to_cpu(hpte[1]); + gr |= r & (HPTE_R_R | HPTE_R_C); + if (r & HPTE_R_C) { + unsigned long psize = hpte_page_size(v, r); + hpte[1] = cpu_to_be64(r & ~HPTE_R_C); + eieio(); + rmap = revmap_for_hpte(kvm, v, gr); + if (rmap) { + lock_rmap(rmap); + *rmap |= KVMPPC_RMAP_CHANGED; + kvmppc_update_rmap_change(rmap, psize); + unlock_rmap(rmap); + } + } + } + vcpu->arch.gpr[4] = gr; + ret = H_SUCCESS; + out: + unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); + return ret; +} + void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 00e45b6d4f24..24f58076d49e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -67,14 +67,12 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, } /* Check if the core is loaded, if not, too hard */ - cpu = vcpu->cpu; + cpu = vcpu->arch.thread_cpu; if (cpu < 0 || cpu >= nr_cpu_ids) { this_icp->rm_action |= XICS_RM_KICK_VCPU; this_icp->rm_kick_target = vcpu; return; } - /* In SMT cpu will always point to thread 0, we adjust it */ - cpu += vcpu->arch.ptid; smp_mb(); kvmhv_rm_send_ipi(cpu); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index faa86e9c0551..2273dcacef39 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -128,6 +128,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) subf r4, r4, r3 mtspr SPRN_DEC, r4 + /* hwthread_req may have got set by cede or no vcpu, so clear it */ + li r0, 0 + stb r0, HSTATE_HWTHREAD_REQ(r13) + /* * For external and machine check interrupts, we need * to call the Linux handler to process the interrupt. @@ -215,7 +219,6 @@ kvm_novcpu_wakeup: ld r5, HSTATE_KVM_VCORE(r13) li r0, 0 stb r0, HSTATE_NAPPING(r13) - stb r0, HSTATE_HWTHREAD_REQ(r13) /* check the wake reason */ bl kvmppc_check_wake_reason @@ -315,10 +318,10 @@ kvm_start_guest: cmpdi r3, 0 bge kvm_no_guest - /* get vcpu pointer, NULL if we have no vcpu to run */ - ld r4,HSTATE_KVM_VCPU(r13) - cmpdi r4,0 - /* if we have no vcpu to run, go back to sleep */ + /* get vcore pointer, NULL if we have nothing to run */ + ld r5,HSTATE_KVM_VCORE(r13) + cmpdi r5,0 + /* if we have no vcore to run, go back to sleep */ beq kvm_no_guest kvm_secondary_got_guest: @@ -327,21 +330,42 @@ kvm_secondary_got_guest: ld r6, PACA_DSCR_DEFAULT(r13) std r6, HSTATE_DSCR(r13) - /* Order load of vcore, ptid etc. after load of vcpu */ + /* On thread 0 of a subcore, set HDEC to max */ + lbz r4, HSTATE_PTID(r13) + cmpwi r4, 0 + bne 63f + lis r6, 0x7fff + ori r6, r6, 0xffff + mtspr SPRN_HDEC, r6 + /* and set per-LPAR registers, if doing dynamic micro-threading */ + ld r6, HSTATE_SPLIT_MODE(r13) + cmpdi r6, 0 + beq 63f + ld r0, KVM_SPLIT_RPR(r6) + mtspr SPRN_RPR, r0 + ld r0, KVM_SPLIT_PMMAR(r6) + mtspr SPRN_PMMAR, r0 + ld r0, KVM_SPLIT_LDBAR(r6) + mtspr SPRN_LDBAR, r0 + isync +63: + /* Order load of vcpu after load of vcore */ lwsync + ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_hv_entry /* Back from the guest, go back to nap */ - /* Clear our vcpu pointer so we don't come back in early */ + /* Clear our vcpu and vcore pointers so we don't come back in early */ li r0, 0 + std r0, HSTATE_KVM_VCPU(r13) /* - * Once we clear HSTATE_KVM_VCPU(r13), the code in + * Once we clear HSTATE_KVM_VCORE(r13), the code in * kvmppc_run_core() is going to assume that all our vcpu * state is visible in memory. This lwsync makes sure * that that is true. */ lwsync - std r0, HSTATE_KVM_VCPU(r13) + std r0, HSTATE_KVM_VCORE(r13) /* * At this point we have finished executing in the guest. @@ -374,16 +398,71 @@ kvm_no_guest: b power7_wakeup_loss 53: HMT_LOW - ld r4, HSTATE_KVM_VCPU(r13) - cmpdi r4, 0 + ld r5, HSTATE_KVM_VCORE(r13) + cmpdi r5, 0 + bne 60f + ld r3, HSTATE_SPLIT_MODE(r13) + cmpdi r3, 0 + beq kvm_no_guest + lbz r0, KVM_SPLIT_DO_NAP(r3) + cmpwi r0, 0 beq kvm_no_guest HMT_MEDIUM + b kvm_unsplit_nap +60: HMT_MEDIUM b kvm_secondary_got_guest 54: li r0, KVM_HWTHREAD_IN_KVM stb r0, HSTATE_HWTHREAD_STATE(r13) b kvm_no_guest +/* + * Here the primary thread is trying to return the core to + * whole-core mode, so we need to nap. + */ +kvm_unsplit_nap: + /* + * Ensure that secondary doesn't nap when it has + * its vcore pointer set. + */ + sync /* matches smp_mb() before setting split_info.do_nap */ + ld r0, HSTATE_KVM_VCORE(r13) + cmpdi r0, 0 + bne kvm_no_guest + /* clear any pending message */ +BEGIN_FTR_SECTION + lis r6, (PPC_DBELL_SERVER << (63-36))@h + PPC_MSGCLR(6) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + /* Set kvm_split_mode.napped[tid] = 1 */ + ld r3, HSTATE_SPLIT_MODE(r13) + li r0, 1 + lhz r4, PACAPACAINDEX(r13) + clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ + addi r4, r4, KVM_SPLIT_NAPPED + stbx r0, r3, r4 + /* Check the do_nap flag again after setting napped[] */ + sync + lbz r0, KVM_SPLIT_DO_NAP(r3) + cmpwi r0, 0 + beq 57f + li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 + mfspr r4, SPRN_LPCR + rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) + mtspr SPRN_LPCR, r4 + isync + std r0, HSTATE_SCRATCH0(r13) + ptesync + ld r0, HSTATE_SCRATCH0(r13) +1: cmpd r0, r0 + bne 1b + nap + b . + +57: li r0, 0 + stbx r0, r3, r4 + b kvm_no_guest + /****************************************************************************** * * * Entry code * @@ -854,7 +933,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) cmpwi r0, 0 bne 21f HMT_LOW -20: lbz r0, VCORE_IN_GUEST(r5) +20: lwz r3, VCORE_ENTRY_EXIT(r5) + cmpwi r3, 0x100 + bge no_switch_exit + lbz r0, VCORE_IN_GUEST(r5) cmpwi r0, 0 beq 20b HMT_MEDIUM @@ -870,7 +952,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) blt hdec_soon ld r6, VCPU_CTR(r4) - lwz r7, VCPU_XER(r4) + ld r7, VCPU_XER(r4) mtctr r6 mtxer r7 @@ -985,9 +1067,13 @@ secondary_too_late: #endif 11: b kvmhv_switch_to_host +no_switch_exit: + HMT_MEDIUM + li r12, 0 + b 12f hdec_soon: li r12, BOOK3S_INTERRUPT_HV_DECREMENTER - stw r12, VCPU_TRAP(r4) +12: stw r12, VCPU_TRAP(r4) mr r9, r4 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING addi r3, r4, VCPU_TB_RMEXIT @@ -1103,7 +1189,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) mfctr r3 mfxer r4 std r3, VCPU_CTR(r9) - stw r4, VCPU_XER(r9) + std r4, VCPU_XER(r9) /* If this is a page table miss then see if it's theirs or ours */ cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE @@ -1127,6 +1213,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL bne 3f lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 beq 4f b guest_exit_cont 3: @@ -1176,6 +1263,11 @@ mc_cont: ld r9, HSTATE_KVM_VCPU(r13) lwz r12, VCPU_TRAP(r9) + /* Stop others sending VCPU interrupts to this physical CPU */ + li r0, -1 + stw r0, VCPU_CPU(r9) + stw r0, VCPU_THREAD_CPU(r9) + /* Save guest CTRL register, set runlatch to 1 */ mfspr r6,SPRN_CTRLF stw r6,VCPU_CTRL(r9) @@ -1540,12 +1632,17 @@ kvmhv_switch_to_host: /* Primary thread waits for all the secondaries to exit guest */ 15: lwz r3,VCORE_ENTRY_EXIT(r5) - srwi r0,r3,8 + rlwinm r0,r3,32-8,0xff clrldi r3,r3,56 cmpw r3,r0 bne 15b isync + /* Did we actually switch to the guest at all? */ + lbz r6, VCORE_IN_GUEST(r5) + cmpwi r6, 0 + beq 19f + /* Primary thread switches back to host partition */ ld r6,KVM_HOST_SDR1(r4) lwz r7,KVM_HOST_LPID(r4) @@ -1589,7 +1686,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 18: /* Signal secondary CPUs to continue */ stb r0,VCORE_IN_GUEST(r5) - lis r8,0x7fff /* MAX_INT@h */ +19: lis r8,0x7fff /* MAX_INT@h */ mtspr SPRN_HDEC,r8 16: ld r8,KVM_HOST_LPCR(r4) @@ -1675,7 +1772,7 @@ kvmppc_hdsi: bl kvmppc_msr_interrupt fast_interrupt_c_return: 6: ld r7, VCPU_CTR(r9) - lwz r8, VCPU_XER(r9) + ld r8, VCPU_XER(r9) mtctr r7 mtxer r8 mr r4, r9 @@ -1816,8 +1913,8 @@ hcall_real_table: .long DOTSYM(kvmppc_h_remove) - hcall_real_table .long DOTSYM(kvmppc_h_enter) - hcall_real_table .long DOTSYM(kvmppc_h_read) - hcall_real_table - .long 0 /* 0x10 - H_CLEAR_MOD */ - .long 0 /* 0x14 - H_CLEAR_REF */ + .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table + .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table .long DOTSYM(kvmppc_h_protect) - hcall_real_table .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index bd6ab1672ae6..a759d9adb0b6 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -352,7 +352,7 @@ static inline u32 inst_get_field(u32 inst, int msb, int lsb) return kvmppc_get_field(inst, msb + 32, lsb + 32); } -bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst) +static bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst) { if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) return false; diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index acee37cde840..ca8f174289bb 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -123,7 +123,7 @@ no_dcbz32_on: PPC_LL r8, SVCPU_CTR(r3) PPC_LL r9, SVCPU_LR(r3) lwz r10, SVCPU_CR(r3) - lwz r11, SVCPU_XER(r3) + PPC_LL r11, SVCPU_XER(r3) mtctr r8 mtlr r9 @@ -237,7 +237,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) mfctr r8 mflr r9 - stw r5, SVCPU_XER(r13) + PPC_STL r5, SVCPU_XER(r13) PPC_STL r6, SVCPU_FAULT_DAR(r13) stw r7, SVCPU_FAULT_DSISR(r13) PPC_STL r8, SVCPU_CTR(r13) diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index c6ca7db64673..905e94a1370f 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -41,7 +41,7 @@ * ======= * * Each ICS has a spin lock protecting the information about the IRQ - * sources and avoiding simultaneous deliveries if the same interrupt. + * sources and avoiding simultaneous deliveries of the same interrupt. * * ICP operations are done via a single compare & swap transaction * (most ICP state fits in the union kvmppc_icp_state) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index cc5842657161..ae458f0fd061 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -933,6 +933,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, #endif break; case BOOKE_INTERRUPT_CRITICAL: + kvmppc_fill_pt_regs(®s); unknown_exception(®s); break; case BOOKE_INTERRUPT_DEBUG: diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index 50860e919cb8..29911a07bcdb 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -377,7 +377,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); vcpu->arch.shared->mas1 = (vcpu->arch.shared->mas6 & MAS6_SPID0) - | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0)) + | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0) | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); vcpu->arch.shared->mas2 &= MAS2_EPN; vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index e5dde32fe71f..2e51289610e4 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -660,7 +660,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return kvmppc_core_pending_dec(vcpu); } -enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) +static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; |