From e80639405c40127727812a0e1f8a65ba9979f146 Mon Sep 17 00:00:00 2001 From: Aneesh Kumar K.V Date: Wed, 7 Oct 2020 11:03:05 +0530 Subject: powerpc/mm: Update tlbiel loop on POWER10 With POWER10, single tlbiel instruction invalidates all the congruence class of the TLB and hence we need to issue only one tlbiel with SET=0. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201007053305.232879-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_hv.c | 7 ++++++- arch/powerpc/kvm/book3s_hv_builtin.c | 11 ++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e3b1839fc251..0faafe6f8c4e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4949,7 +4949,12 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) * Work out how many sets the TLB has, for the use of * the TLB invalidation loop in book3s_hv_rmhandlers.S. */ - if (radix_enabled()) + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + /* + * P10 will flush all the congruence class with a single tlbiel + */ + kvm->arch.tlb_sets = 1; + } else if (radix_enabled()) kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ else if (cpu_has_feature(CPU_FTR_ARCH_300)) kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 8f58dd20b362..8053efdf7ea7 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -694,6 +694,7 @@ static void wait_for_sync(struct kvm_split_mode *sip, int phase) void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) { + int num_sets; unsigned long rb, set; /* wait for every other thread to get to real mode */ @@ -704,11 +705,19 @@ void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) mtspr(SPRN_LPID, sip->lpidr_req); isync(); + /* + * P10 will flush all the congruence class with a single tlbiel + */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + num_sets = 1; + else + num_sets = POWER9_TLB_SETS_RADIX; + /* Invalidate the TLB on thread 0 */ if (local_paca->kvm_hstate.tid == 0) { sip->do_set = 0; asm volatile("ptesync" : : : "memory"); - for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) { + for (set = 0; set < num_sets; ++set) { rb = TLBIEL_INVAL_SET_LPID + (set << TLBIEL_INVAL_SET_SHIFT); asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : -- cgit v1.2.3 From 120c0518ec321f33cdc4670059fb76e96ceb56eb Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sun, 8 Nov 2020 16:57:36 +0000 Subject: powerpc: Replace RFI by rfi on book3s/32 and booke For book3s/32 and for booke, RFI is just an rfi. Only 40x has a non trivial RFI. CONFIG_PPC_RTAS is never selected by 40x platforms. Make it more explicit by replacing RFI by rfi wherever possible. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b901ddfdeb8a0a3b7cb59999599cdfde1bbfe834.1604854583.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 6 +++--- arch/powerpc/kernel/head_book3s_32.S | 18 +++++++++--------- arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kvm/book3s_rmhandlers.S | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 8cdc8bcde703..e10e1167ffb1 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1027,7 +1027,7 @@ exc_exit_restart: lwz r1,GPR1(r1) .globl exc_exit_restart_end exc_exit_restart_end: - RFI + rfi _ASM_NOKPROBE_SYMBOL(exc_exit_restart) _ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) @@ -1356,7 +1356,7 @@ _GLOBAL(enter_rtas) stw r7, THREAD + RTAS_SP(r2) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 - RFI + rfi 1: tophys_novmstack r9, r1 #ifdef CONFIG_VMAP_STACK li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */ @@ -1371,6 +1371,6 @@ _GLOBAL(enter_rtas) stw r0, THREAD + RTAS_SP(r7) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 - RFI /* return to caller */ + rfi /* return to caller */ _ASM_NOKPROBE_SYMBOL(enter_rtas) #endif /* CONFIG_PPC_RTAS */ diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 5eb9eedac920..40e8c8ce4018 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -206,7 +206,7 @@ turn_on_mmu: lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 - RFI /* enables MMU */ + rfi /* enables MMU */ /* * We need __secondary_hold as a place to hold the other cpus on @@ -769,13 +769,13 @@ fast_hash_page_return: mtcr r11 lwz r11, THR11(r10) mfspr r10, SPRN_SPRG_SCRATCH0 - RFI + rfi 1: /* ISI */ mtcr r11 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 - RFI + rfi stack_overflow: vmap_stack_overflow_exception @@ -910,7 +910,7 @@ __secondary_start: ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - RFI + rfi #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER @@ -1038,7 +1038,7 @@ start_here: .align 4 mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 - RFI + rfi /* Load up the kernel context */ 2: bl load_up_mmu @@ -1062,7 +1062,7 @@ start_here: ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - RFI + rfi /* * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); @@ -1177,7 +1177,7 @@ _ENTRY(update_bats) .align 4 mtspr SPRN_SRR0, r4 mtspr SPRN_SRR1, r3 - RFI + rfi 1: bl clear_bats lis r3, BATS@ha addi r3, r3, BATS@l @@ -1196,7 +1196,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) mtmsr r3 mtspr SPRN_SRR0, r7 mtspr SPRN_SRR1, r6 - RFI + rfi flush_tlbs: lis r10, 0x40 @@ -1217,7 +1217,7 @@ mmu_off: mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 sync - RFI + rfi /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ initial_bats: diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 71c359d438b5..e26d35de27e5 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -176,7 +176,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) #endif mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 - RFI /* jump to handler, enable MMU */ + rfi /* jump to handler, enable MMU */ 99: b ret_from_kernel_syscall .endm diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 3dc129a254b5..b45b750fa77a 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -36,8 +36,8 @@ #define FUNC(name) name -#define RFI_TO_KERNEL RFI -#define RFI_TO_GUEST RFI +#define RFI_TO_KERNEL rfi +#define RFI_TO_GUEST rfi .macro INTERRUPT_TRAMPOLINE intno -- cgit v1.2.3 From 067c9f9c98c8804b07751994c51d8557e440821e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 28 Nov 2020 17:07:23 +1000 Subject: KVM: PPC: Book3S HV: Don't attempt to recover machine checks for FWNMI enabled guests Guests that can deal with machine checks would actually prefer the hypervisor not to try recover for them. For example if SLB multi-hits are recovered by the hypervisor by clearing the SLB then the guest will not be able to log the contents and debug its programming error. If guests don't register for FWNMI, they may not be so capable and so the hypervisor will continue to recover for those. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201128070728.825934-4-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv_ras.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 6028628ea3ac..d4bca93b79f6 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -65,10 +65,9 @@ static void reload_slb(struct kvm_vcpu *vcpu) * On POWER7, see if we can handle a machine check that occurred inside * the guest in real mode, without switching to the host partition. */ -static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) +static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) { unsigned long srr1 = vcpu->arch.shregs.msr; - struct machine_check_event mce_evt; long handled = 1; if (srr1 & SRR1_MC_LDSTERR) { @@ -106,6 +105,21 @@ static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) handled = 0; } + return handled; +} + +void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) +{ + struct machine_check_event mce_evt; + long handled; + + if (vcpu->kvm->arch.fwnmi_enabled) { + /* FWNMI guests handle their own recovery */ + handled = 0; + } else { + handled = kvmppc_realmode_mc_power7(vcpu); + } + /* * Now get the event and stash it in the vcpu struct so it can * be handled by the primary thread in virtual mode. We can't @@ -122,11 +136,6 @@ static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) vcpu->arch.mce_evt = mce_evt; } -void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) -{ - kvmppc_realmode_mc_power7(vcpu); -} - /* Check if dynamic split is in force and return subcore size accordingly. */ static inline int kvmppc_cur_subcore_size(void) { -- cgit v1.2.3 From 1d15ffdfc94127d75e04a88344ee1ce8c79f05fd Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 28 Nov 2020 17:07:24 +1000 Subject: KVM: PPC: Book3S HV: Ratelimit machine check messages coming from guests A number of machine check exceptions are triggerable by the guest. Ratelimit these to avoid a guest flooding the host console and logs. Signed-off-by: Nicholas Piggin [mpe: Use dedicated ratelimit state, not printk_ratelimit()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201128070728.825934-5-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0faafe6f8c4e..cfaa91b27112 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1327,9 +1327,15 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_SYSTEM_RESET: r = RESUME_GUEST; break; - case BOOK3S_INTERRUPT_MACHINE_CHECK: - /* Print the MCE event to host console. */ - machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); + case BOOK3S_INTERRUPT_MACHINE_CHECK: { + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + /* + * Print the MCE event to host console. Ratelimit so the guest + * can't flood the host log. + */ + if (__ratelimit(&rs)) + machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); /* * If the guest can do FWNMI, exit to userspace so it can @@ -1357,6 +1363,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = RESUME_HOST; break; + } case BOOK3S_INTERRUPT_PROGRAM: { ulong flags; @@ -1516,11 +1523,16 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: + { + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); /* Pass the machine check to the L1 guest */ r = RESUME_HOST; /* Print the MCE event to host console. */ - machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); + if (__ratelimit(&rs)) + machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); break; + } /* * We get these next two if the guest accesses a page which it thinks * it has mapped but which is not actually present, either because -- cgit v1.2.3 From 9f378b9f007cc94beadea40df83cc62a76975c6f Mon Sep 17 00:00:00 2001 From: Aneesh Kumar K.V Date: Fri, 27 Nov 2020 10:14:04 +0530 Subject: KVM: PPC: BOOK3S: PR: Ignore UAMOR SPR With power7 and above we expect the cpu to support keys. The number of keys are firmware controlled based on device tree. PR KVM do not expose key details via device tree. Hence when running with PR KVM we do run with MMU_FTR_KEY support disabled. But we can still get updates on UAMOR. Hence ignore access to them and for mfstpr return 0 indicating no AMR/IAMR update is no allowed. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201127044424.40686-3-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_emulate.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 0effd48c8f4d..b08cc15f31c7 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -840,6 +840,9 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_MMCR1: case SPRN_MMCR2: case SPRN_UMMCR2: + case SPRN_UAMOR: + case SPRN_IAMR: + case SPRN_AMR: #endif break; unprivileged: @@ -1004,6 +1007,9 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_MMCR2: case SPRN_UMMCR2: case SPRN_TIR: + case SPRN_UAMOR: + case SPRN_IAMR: + case SPRN_AMR: #endif *spr_val = 0; break; -- cgit v1.2.3 From ff57698a9610fcf7d9c4469bf68c881eff22e2f8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 22 Oct 2020 09:29:21 +0000 Subject: powerpc: Fix update form addressing in inline assembly In several places, inline assembly uses the "%Un" modifier to enable the use of instruction with update form addressing, but the associated "<>" constraint is missing. As mentioned in previous patch, this fails with gcc 4.9, so "<>" can't be used directly. Use UPD_CONSTR macro everywhere %Un modifier is used. Signed-off-by: Christophe Leroy Reviewed-by: Segher Boessenkool Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/62eab5ca595485c192de1765bdac099f633a21d0.1603358942.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/atomic.h | 9 +++++---- arch/powerpc/include/asm/io.h | 4 ++-- arch/powerpc/kvm/powerpc.c | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 8a55eb8cc97b..61c6e8b200e8 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -10,6 +10,7 @@ #include #include #include +#include /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -26,14 +27,14 @@ static __inline__ int atomic_read(const atomic_t *v) { int t; - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); return t; } static __inline__ void atomic_set(atomic_t *v, int i) { - __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); } #define ATOMIC_OP(op, asm_op) \ @@ -316,14 +317,14 @@ static __inline__ s64 atomic64_read(const atomic64_t *v) { s64 t; - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); return t; } static __inline__ void atomic64_set(atomic64_t *v, s64 i) { - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 2469b46ac2c4..273edd208ec5 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ - : "=r" (ret) : "m" (*addr) : "memory"); \ + : "=r" (ret) : "m"UPD_CONSTR (*addr) : "memory"); \ return ret; \ } @@ -130,7 +130,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ - : "=m" (*addr) : "r" (val) : "memory"); \ + : "=m"UPD_CONSTR (*addr) : "r" (val) : "memory"); \ mmiowb_set_pending(); \ } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 13999123b735..cf52d26f49cd 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1087,7 +1087,7 @@ static inline u64 sp_to_dp(u32 fprs) preempt_disable(); enable_kernel_fp(); - asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) + asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m"UPD_CONSTR (fprd) : "m"UPD_CONSTR (fprs) : "fr0"); preempt_enable(); return fprd; @@ -1099,7 +1099,7 @@ static inline u32 dp_to_sp(u64 fprd) preempt_disable(); enable_kernel_fp(); - asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) + asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m"UPD_CONSTR (fprs) : "m"UPD_CONSTR (fprd) : "fr0"); preempt_enable(); return fprs; -- cgit v1.2.3 From e89a8ca94bf583f2577fe722483f0304b3390aa2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 6 Nov 2020 14:53:40 +1000 Subject: powerpc/64s: Remove MSR[ISF] bit No supported processor implements this mode. Setting the bit in MSR values can be a bit confusing (and would prevent the bit from ever being reused). Remove it. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201106045340.1935841-1-npiggin@gmail.com --- arch/powerpc/include/asm/reg.h | 5 +---- arch/powerpc/kernel/entry_64.S | 2 +- arch/powerpc/kernel/head_64.S | 3 +-- arch/powerpc/kvm/book3s_pr.c | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index b9492f2b0608..e40a921d78f9 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -29,7 +29,6 @@ #include #define MSR_SF_LG 63 /* Enable 64 bit mode */ -#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ #define MSR_HV_LG 60 /* Hypervisor state */ #define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */ #define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */ @@ -69,13 +68,11 @@ #ifdef CONFIG_PPC64 #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ -#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */ #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ #define MSR_S __MASK(MSR_S_LG) /* Secure state */ #else /* so tests for these bits fail on 32-bit */ #define MSR_SF 0 -#define MSR_ISF 0 #define MSR_HV 0 #define MSR_S 0 #endif @@ -134,7 +131,7 @@ #define MSR_64BIT MSR_SF /* Server variant */ -#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV) #ifdef __BIG_ENDIAN__ #define MSR_ __MSR #define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c9d59450fba0..aa1af139d947 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -969,7 +969,7 @@ _GLOBAL(enter_prom) mtsrr1 r11 rfi #else /* CONFIG_PPC_BOOK3E */ - LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) + LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 RFI_TO_KERNEL diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index f63d01c78398..ece7f97bafff 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -870,8 +870,7 @@ enable_64b_mode: oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ mtmsr r11 #else /* CONFIG_PPC_BOOK3E */ - li r12,(MSR_64BIT | MSR_ISF)@highest - sldi r12,r12,48 + LOAD_REG_IMMEDIATE(r12, MSR_64BIT) or r11,r11,r12 mtmsrd r11 isync diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index b1fefa63e125..913944dc3620 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -239,7 +239,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) smsr |= (guest_msr & vcpu->arch.guest_owned_ext); /* 64-bit Process MSR values */ #ifdef CONFIG_PPC_BOOK3S_64 - smsr |= MSR_ISF | MSR_HV; + smsr |= MSR_HV; #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* -- cgit v1.2.3 From 98983675008ab3ae9b37fc7a4bfa083998079215 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 10 Dec 2020 18:14:38 +0100 Subject: KVM: PPC: Book3S HV: XIVE: Show detailed configuration in debug output This is useful to track allocation of the HW resources on per guest basis. Making sure IPIs are local to the chip of the vCPUs reduces rerouting between interrupt controllers and gives better performance in case of pinning. Checking the distribution of VP structures on the chips also helps in reducing PowerBUS traffic. [ clg: resurrected show_sources and reworked ouput ] Signed-off-by: Greg Kurz Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201210171450.1933725-2-clg@kaod.org --- arch/powerpc/kvm/book3s_xive.c | 76 +++++++++++++++++++++++++++++------ arch/powerpc/kvm/book3s_xive.h | 2 + arch/powerpc/kvm/book3s_xive_native.c | 21 ++++++++-- 3 files changed, 82 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 85215e79db42..773e8e8c0015 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -2128,9 +2128,8 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) if (!q->qpage && !xc->esc_virq[i]) continue; - seq_printf(m, " [q%d]: ", i); - if (q->qpage) { + seq_printf(m, " q[%d]: ", i); idx = q->idx; i0 = be32_to_cpup(q->qpage + idx); idx = (idx + 1) & q->msk; @@ -2144,16 +2143,54 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) irq_data_get_irq_handler_data(d); u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); - seq_printf(m, "E:%c%c I(%d:%llx:%llx)", - (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', - (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', - xc->esc_virq[i], pq, xd->eoi_page); + seq_printf(m, " ESC %d %c%c EOI @%llx", + xc->esc_virq[i], + (pq & XIVE_ESB_VAL_P) ? 'P' : '-', + (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-', + xd->eoi_page); seq_puts(m, "\n"); } } return 0; } +void kvmppc_xive_debug_show_sources(struct seq_file *m, + struct kvmppc_xive_src_block *sb) +{ + int i; + + seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n"); + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; + struct xive_irq_data *xd; + u64 pq; + u32 hw_num; + + if (!state->valid) + continue; + + kvmppc_xive_select_irq(state, &hw_num, &xd); + + pq = xive_vm_esb_load(xd, XIVE_ESB_GET); + + seq_printf(m, "%08x %08x/%02x", state->number, hw_num, + xd->src_chip); + if (state->lsi) + seq_printf(m, " %cLSI", state->asserted ? '^' : ' '); + else + seq_puts(m, " MSI"); + + seq_printf(m, " %s %c%c %08x % 4d/%d", + state->ipi_number == hw_num ? "IPI" : " PT", + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + state->eisn, state->act_server, + state->act_priority); + + seq_puts(m, "\n"); + } +} + static int xive_debug_show(struct seq_file *m, void *private) { struct kvmppc_xive *xive = m->private; @@ -2174,7 +2211,7 @@ static int xive_debug_show(struct seq_file *m, void *private) if (!kvm) return 0; - seq_printf(m, "=========\nVCPU state\n=========\n"); + seq_puts(m, "=========\nVCPU state\n=========\n"); kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; @@ -2182,11 +2219,12 @@ static int xive_debug_show(struct seq_file *m, void *private) if (!xc) continue; - seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x" - " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", - xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr, - xc->mfrr, xc->pending, - xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); + seq_printf(m, "VCPU %d: VP:%#x/%02x\n" + " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", + xc->server_num, xc->vp_id, xc->vp_chip_id, + xc->cppr, xc->hw_cppr, + xc->mfrr, xc->pending, + xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); kvmppc_xive_debug_show_queues(m, vcpu); @@ -2202,13 +2240,25 @@ static int xive_debug_show(struct seq_file *m, void *private) t_vm_h_ipi += xc->stat_vm_h_ipi; } - seq_printf(m, "Hcalls totals\n"); + seq_puts(m, "Hcalls totals\n"); seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr); seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll); seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr); seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi); seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi); + seq_puts(m, "=========\nSources\n=========\n"); + + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (sb) { + arch_spin_lock(&sb->lock); + kvmppc_xive_debug_show_sources(m, sb); + arch_spin_unlock(&sb->lock); + } + } + return 0; } diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 382e3a56e789..d5d4fee7ac94 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -290,6 +290,8 @@ extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); */ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu); int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu); +void kvmppc_xive_debug_show_sources(struct seq_file *m, + struct kvmppc_xive_src_block *sb); struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( struct kvmppc_xive *xive, int irq); void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb); diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index a59a94f02733..7f120cf9c594 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -1219,18 +1219,31 @@ static int xive_native_debug_show(struct seq_file *m, void *private) if (!xc) continue; - seq_printf(m, "cpu server %#x VP=%#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", - xc->server_num, xc->vp_id, + seq_printf(m, "VCPU %d: VP=%#x/%02x\n" + " NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", + xc->server_num, xc->vp_id, xc->vp_chip_id, vcpu->arch.xive_saved_state.nsr, vcpu->arch.xive_saved_state.cppr, vcpu->arch.xive_saved_state.ipb, vcpu->arch.xive_saved_state.pipr, - vcpu->arch.xive_saved_state.w01, - (u32) vcpu->arch.xive_cam_word); + be64_to_cpu(vcpu->arch.xive_saved_state.w01), + be32_to_cpu(vcpu->arch.xive_cam_word)); kvmppc_xive_debug_show_queues(m, vcpu); } + seq_puts(m, "=========\nSources\n=========\n"); + + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (sb) { + arch_spin_lock(&sb->lock); + kvmppc_xive_debug_show_sources(m, sb); + arch_spin_unlock(&sb->lock); + } + } + return 0; } -- cgit v1.2.3 From 4f1c3f7b08187e6b97701c7fb2dc6f3749566c62 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 10 Dec 2020 18:14:39 +0100 Subject: powerpc/xive: Rename XIVE_IRQ_NO_EOI to show its a flag This is a simple cleanup to identify easily all flags of the XIVE interrupt structure. The interrupts flagged with XIVE_IRQ_FLAG_NO_EOI are the escalations used to wake up vCPUs in KVM. They are handled very differently from the rest. Signed-off-by: Cédric Le Goater Reviewed-by: Greg Kurz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201210171450.1933725-3-clg@kaod.org --- arch/powerpc/include/asm/xive.h | 2 +- arch/powerpc/kvm/book3s_xive.c | 4 ++-- arch/powerpc/sysdev/xive/common.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 309b4d65b74f..d332dd9a18de 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -66,7 +66,7 @@ struct xive_irq_data { #define XIVE_IRQ_FLAG_H_INT_ESB 0x20 /* Special flag set by KVM for excalation interrupts */ -#define XIVE_IRQ_NO_EOI 0x80 +#define XIVE_IRQ_FLAG_NO_EOI 0x80 #define XIVE_INVALID_CHIP_ID -1 diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 773e8e8c0015..7f60d1353d0e 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -219,7 +219,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, /* In single escalation mode, we grab the ESB MMIO of the * interrupt and mask it. Also populate the VCPU v/raddr * of the ESB page for use by asm entry/exit code. Finally - * set the XIVE_IRQ_NO_EOI flag which will prevent the + * set the XIVE_IRQ_FLAG_NO_EOI flag which will prevent the * core code from performing an EOI on the escalation * interrupt, thus leaving it effectively masked after * it fires once. @@ -231,7 +231,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); vcpu->arch.xive_esc_raddr = xd->eoi_page; vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; - xd->flags |= XIVE_IRQ_NO_EOI; + xd->flags |= XIVE_IRQ_FLAG_NO_EOI; } return 0; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index a80440af491a..65af34ac1fa2 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -416,7 +416,7 @@ static void xive_irq_eoi(struct irq_data *d) * been passed-through to a KVM guest */ if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && - !(xd->flags & XIVE_IRQ_NO_EOI)) + !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) xive_do_source_eoi(irqd_to_hwirq(d), xd); else xd->stale_p = true; -- cgit v1.2.3 From 4cc0e36df2c0a41fd38645ddde08d2bfba699b7a Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 10 Dec 2020 18:14:45 +0100 Subject: powerpc/xive: Remove P9 DD1 flag XIVE_IRQ_FLAG_SHIFT_BUG This flag was used to support the PHB4 LSIs on P9 DD1 and we have stopped supporting this CPU when DD2 came out. See skiboot commit: https://github.com/open-power/skiboot/commit/0b0d15e3c170 Signed-off-by: Cédric Le Goater Reviewed-by: Greg Kurz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201210171450.1933725-9-clg@kaod.org --- arch/powerpc/include/asm/opal-api.h | 2 +- arch/powerpc/include/asm/xive.h | 2 +- arch/powerpc/kvm/book3s_xive_native.c | 3 --- arch/powerpc/kvm/book3s_xive_template.c | 3 --- arch/powerpc/sysdev/xive/common.c | 9 --------- arch/powerpc/sysdev/xive/native.c | 2 -- 6 files changed, 2 insertions(+), 19 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 1dffa3cb16ba..48ee604ca39a 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1091,7 +1091,7 @@ enum { OPAL_XIVE_IRQ_TRIGGER_PAGE = 0x00000001, OPAL_XIVE_IRQ_STORE_EOI = 0x00000002, OPAL_XIVE_IRQ_LSI = 0x00000004, - OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, + OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */ OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, }; diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index d332dd9a18de..b3c039d0bb6e 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -60,7 +60,7 @@ struct xive_irq_data { }; #define XIVE_IRQ_FLAG_STORE_EOI 0x01 #define XIVE_IRQ_FLAG_LSI 0x02 -#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 +/* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */ #define XIVE_IRQ_FLAG_MASK_FW 0x08 #define XIVE_IRQ_FLAG_EOI_FW 0x10 #define XIVE_IRQ_FLAG_H_INT_ESB 0x20 diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 7f120cf9c594..76800c84f2a3 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -37,9 +37,6 @@ static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) * ordering. */ - if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) - offset |= offset << 4; - val = in_be64(xd->eoi_mmio + offset); return (u8)val; } diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 4ad3c0279458..ece36e024a8f 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -61,9 +61,6 @@ static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) offset |= XIVE_ESB_LD_ST_MO; - if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) - offset |= offset << 4; - val =__x_readq(__x_eoi_page(xd) + offset); #ifdef __LITTLE_ENDIAN__ val >>= 64-8; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 61a5f08798e9..8499d0b24c1d 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -200,10 +200,6 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) offset |= XIVE_ESB_LD_ST_MO; - /* Handle HW errata */ - if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) - offset |= offset << 4; - if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); else @@ -214,10 +210,6 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) { - /* Handle HW errata */ - if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) - offset |= offset << 4; - if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) xive_ops->esb_rw(xd->hw_irq, offset, data, 1); else @@ -1312,7 +1304,6 @@ static const struct { } xive_irq_flags[] = { { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, { XIVE_IRQ_FLAG_LSI, "LSI" }, - { XIVE_IRQ_FLAG_SHIFT_BUG, "SHIFT_BUG" }, { XIVE_IRQ_FLAG_MASK_FW, "MASK_FW" }, { XIVE_IRQ_FLAG_EOI_FW, "EOI_FW" }, { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index c3182ec9ed65..f501b1640068 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -64,8 +64,6 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->flags |= XIVE_IRQ_FLAG_STORE_EOI; if (opal_flags & OPAL_XIVE_IRQ_LSI) data->flags |= XIVE_IRQ_FLAG_LSI; - if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG) - data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG; if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW) data->flags |= XIVE_IRQ_FLAG_MASK_FW; if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW) -- cgit v1.2.3 From b5277d18c65e31ce51f6733ebdca3985a962fab5 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 10 Dec 2020 18:14:46 +0100 Subject: powerpc/xive: Remove P9 DD1 flag XIVE_IRQ_FLAG_MASK_FW This flag was used to support the PHB4 LSIs on P9 DD1 and we have stopped supporting this CPU when DD2 came out. See skiboot commit: https://github.com/open-power/skiboot/commit/0b0d15e3c170 Signed-off-by: Cédric Le Goater Reviewed-by: Greg Kurz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201210171450.1933725-10-clg@kaod.org --- arch/powerpc/include/asm/opal-api.h | 2 +- arch/powerpc/include/asm/xive.h | 2 +- arch/powerpc/kvm/book3s_xive.c | 54 ++++++------------------------------- arch/powerpc/sysdev/xive/common.c | 40 +-------------------------- arch/powerpc/sysdev/xive/native.c | 2 -- 5 files changed, 11 insertions(+), 89 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 48ee604ca39a..0455b679c050 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1092,7 +1092,7 @@ enum { OPAL_XIVE_IRQ_STORE_EOI = 0x00000002, OPAL_XIVE_IRQ_LSI = 0x00000004, OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */ - OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, + OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */ OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, }; diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index b3c039d0bb6e..8d5b0dcc253c 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -61,7 +61,7 @@ struct xive_irq_data { #define XIVE_IRQ_FLAG_STORE_EOI 0x01 #define XIVE_IRQ_FLAG_LSI 0x02 /* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */ -#define XIVE_IRQ_FLAG_MASK_FW 0x08 +/* #define XIVE_IRQ_FLAG_MASK_FW 0x08 */ /* P9 DD1.0 workaround */ #define XIVE_IRQ_FLAG_EOI_FW 0x10 #define XIVE_IRQ_FLAG_H_INT_ESB 0x20 diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 7f60d1353d0e..87535bbe1d74 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -419,37 +419,16 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive, /* Get the right irq */ kvmppc_xive_select_irq(state, &hw_num, &xd); + /* Set PQ to 10, return old P and old Q and remember them */ + val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); + state->old_p = !!(val & 2); + state->old_q = !!(val & 1); + /* - * If the interrupt is marked as needing masking via - * firmware, we do it here. Firmware masking however - * is "lossy", it won't return the old p and q bits - * and won't set the interrupt to a state where it will - * record queued ones. If this is an issue we should do - * lazy masking instead. - * - * For now, we work around this in unmask by forcing - * an interrupt whenever we unmask a non-LSI via FW - * (if ever). + * Synchronize hardware to sensure the queues are updated when + * masking */ - if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { - xive_native_configure_irq(hw_num, - kvmppc_xive_vp(xive, state->act_server), - MASKED, state->number); - /* set old_p so we can track if an H_EOI was done */ - state->old_p = true; - state->old_q = false; - } else { - /* Set PQ to 10, return old P and old Q and remember them */ - val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); - state->old_p = !!(val & 2); - state->old_q = !!(val & 1); - - /* - * Synchronize hardware to sensure the queues are updated - * when masking - */ - xive_native_sync_source(hw_num); - } + xive_native_sync_source(hw_num); return old_prio; } @@ -483,23 +462,6 @@ static void xive_finish_unmask(struct kvmppc_xive *xive, /* Get the right irq */ kvmppc_xive_select_irq(state, &hw_num, &xd); - /* - * See comment in xive_lock_and_mask() concerning masking - * via firmware. - */ - if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { - xive_native_configure_irq(hw_num, - kvmppc_xive_vp(xive, state->act_server), - state->act_priority, state->number); - /* If an EOI is needed, do it here */ - if (!state->old_p) - xive_vm_source_eoi(hw_num, xd); - /* If this is not an LSI, force a trigger */ - if (!(xd->flags & OPAL_XIVE_IRQ_LSI)) - xive_irq_trigger(xd); - goto bail; - } - /* Old Q set, set PQ to 11 */ if (state->old_q) xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 8499d0b24c1d..9165d4834b2d 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -424,9 +424,7 @@ static void xive_irq_eoi(struct irq_data *d) } /* - * Helper used to mask and unmask an interrupt source. This - * is only called for normal interrupts that do not require - * masking/unmasking via firmware. + * Helper used to mask and unmask an interrupt source. */ static void xive_do_source_set_mask(struct xive_irq_data *xd, bool mask) @@ -673,20 +671,6 @@ static void xive_irq_unmask(struct irq_data *d) pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); - /* - * This is a workaround for PCI LSI problems on P9, for - * these, we call FW to set the mask. The problems might - * be fixed by P9 DD2.0, if that is the case, firmware - * will no longer set that flag. - */ - if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - xive_ops->configure_irq(hw_irq, - get_hard_smp_processor_id(xd->target), - xive_irq_priority, d->irq); - return; - } - xive_do_source_set_mask(xd, false); } @@ -696,20 +680,6 @@ static void xive_irq_mask(struct irq_data *d) pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); - /* - * This is a workaround for PCI LSI problems on P9, for - * these, we call OPAL to set the mask. The problems might - * be fixed by P9 DD2.0, if that is the case, firmware - * will no longer set that flag. - */ - if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - xive_ops->configure_irq(hw_irq, - get_hard_smp_processor_id(xd->target), - 0xff, d->irq); - return; - } - xive_do_source_set_mask(xd, true); } @@ -852,13 +822,6 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) int rc; u8 pq; - /* - * We only support this on interrupts that do not require - * firmware calls for masking and unmasking - */ - if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) - return -EIO; - /* * This is called by KVM with state non-NULL for enabling * pass-through or NULL for disabling it @@ -1304,7 +1267,6 @@ static const struct { } xive_irq_flags[] = { { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, { XIVE_IRQ_FLAG_LSI, "LSI" }, - { XIVE_IRQ_FLAG_MASK_FW, "MASK_FW" }, { XIVE_IRQ_FLAG_EOI_FW, "EOI_FW" }, { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index f501b1640068..6c04ac1f3a1f 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -64,8 +64,6 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->flags |= XIVE_IRQ_FLAG_STORE_EOI; if (opal_flags & OPAL_XIVE_IRQ_LSI) data->flags |= XIVE_IRQ_FLAG_LSI; - if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW) - data->flags |= XIVE_IRQ_FLAG_MASK_FW; if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW) data->flags |= XIVE_IRQ_FLAG_EOI_FW; data->eoi_page = be64_to_cpu(eoi_page); -- cgit v1.2.3 From cf58b746665d0177b86d42d18e60985fa1fdb909 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 10 Dec 2020 18:14:47 +0100 Subject: powerpc/xive: Remove P9 DD1 flag XIVE_IRQ_FLAG_EOI_FW This flag was used to support the P9 DD1 and we have stopped supporting this CPU when DD2 came out. See skiboot commit: https://github.com/open-power/skiboot/commit/0b0d15e3c170 Also, remove eoi handler which is now unused. Signed-off-by: Cédric Le Goater Reviewed-by: Greg Kurz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201210171450.1933725-11-clg@kaod.org --- arch/powerpc/include/asm/opal-api.h | 2 +- arch/powerpc/include/asm/xive.h | 2 +- arch/powerpc/kvm/book3s_xive_template.c | 2 -- arch/powerpc/sysdev/xive/common.c | 14 +------------- arch/powerpc/sysdev/xive/native.c | 12 ------------ arch/powerpc/sysdev/xive/spapr.c | 6 ------ arch/powerpc/sysdev/xive/xive-internal.h | 1 - 7 files changed, 3 insertions(+), 36 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 0455b679c050..0b63ba7d5917 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1093,7 +1093,7 @@ enum { OPAL_XIVE_IRQ_LSI = 0x00000004, OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */ OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */ - OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, + OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, /* P9 DD1.0 workaround */ }; /* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 8d5b0dcc253c..9a312b975ca8 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -62,7 +62,7 @@ struct xive_irq_data { #define XIVE_IRQ_FLAG_LSI 0x02 /* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */ /* #define XIVE_IRQ_FLAG_MASK_FW 0x08 */ /* P9 DD1.0 workaround */ -#define XIVE_IRQ_FLAG_EOI_FW 0x10 +/* #define XIVE_IRQ_FLAG_EOI_FW 0x10 */ /* P9 DD1.0 workaround */ #define XIVE_IRQ_FLAG_H_INT_ESB 0x20 /* Special flag set by KVM for excalation interrupts */ diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index ece36e024a8f..b0015e05d99a 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -74,8 +74,6 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); - else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) - opal_int_eoi(hw_irq); else if (xd->flags & XIVE_IRQ_FLAG_LSI) { /* * For LSIs the HW EOI cycle is used rather than PQ bits, diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 9165d4834b2d..3e91ec4bb49e 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -354,18 +354,7 @@ static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) /* If the XIVE supports the new "store EOI facility, use it */ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); - else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { - /* - * The FW told us to call it. This happens for some - * interrupt sources that need additional HW whacking - * beyond the ESB manipulation. For example LPC interrupts - * on P9 DD1.0 needed a latch to be clared in the LPC bridge - * itself. The Firmware will take care of it. - */ - if (WARN_ON_ONCE(!xive_ops->eoi)) - return; - xive_ops->eoi(hw_irq); - } else { + else { u8 eoi_val; /* @@ -1267,7 +1256,6 @@ static const struct { } xive_irq_flags[] = { { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, { XIVE_IRQ_FLAG_LSI, "LSI" }, - { XIVE_IRQ_FLAG_EOI_FW, "EOI_FW" }, { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, }; diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 6c04ac1f3a1f..e91519c42463 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -64,8 +64,6 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->flags |= XIVE_IRQ_FLAG_STORE_EOI; if (opal_flags & OPAL_XIVE_IRQ_LSI) data->flags |= XIVE_IRQ_FLAG_LSI; - if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW) - data->flags |= XIVE_IRQ_FLAG_EOI_FW; data->eoi_page = be64_to_cpu(eoi_page); data->trig_page = be64_to_cpu(trig_page); data->esb_shift = be32_to_cpu(esb_shift); @@ -380,15 +378,6 @@ static void xive_native_update_pending(struct xive_cpu *xc) } } -static void xive_native_eoi(u32 hw_irq) -{ - /* - * Not normally used except if specific interrupts need - * a workaround on EOI. - */ - opal_int_eoi(hw_irq); -} - static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) { s64 rc; @@ -471,7 +460,6 @@ static const struct xive_ops xive_native_ops = { .match = xive_native_match, .shutdown = xive_native_shutdown, .update_pending = xive_native_update_pending, - .eoi = xive_native_eoi, .setup_cpu = xive_native_setup_cpu, .teardown_cpu = xive_native_teardown_cpu, .sync_source = xive_native_sync_source, diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 6610e5149d5a..01ccc0786ada 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -628,11 +628,6 @@ static void xive_spapr_update_pending(struct xive_cpu *xc) } } -static void xive_spapr_eoi(u32 hw_irq) -{ - /* Not used */; -} - static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc) { /* Only some debug on the TIMA settings */ @@ -677,7 +672,6 @@ static const struct xive_ops xive_spapr_ops = { .match = xive_spapr_match, .shutdown = xive_spapr_shutdown, .update_pending = xive_spapr_update_pending, - .eoi = xive_spapr_eoi, .setup_cpu = xive_spapr_setup_cpu, .teardown_cpu = xive_spapr_teardown_cpu, .sync_source = xive_spapr_sync_source, diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index c07fadb9d264..9cf57c722faa 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -52,7 +52,6 @@ struct xive_ops { void (*shutdown)(void); void (*update_pending)(struct xive_cpu *xc); - void (*eoi)(u32 hw_irq); void (*sync_source)(u32 hw_irq); u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write); #ifdef CONFIG_SMP -- cgit v1.2.3 From dddc4ef92d1ce92987da1d6926cdfa99e8acb622 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 10 Dec 2020 18:14:50 +0100 Subject: KVM: PPC: Book3S HV: XIVE: Add a comment regarding VP numbering When the XIVE resources are allocated at the HW level, the VP structures describing the vCPUs of a guest are distributed among the chips to optimize the PowerBUS usage. For best performance, the guest vCPUs can be pinned to match the VP structure distribution. Currently, the VP identifiers are deduced from the vCPU id using the kvmppc_pack_vcpu_id() routine which is not incorrect but not optimal either. It VSMT is used, the result is not continuous and the constraints on HW resources described above can not be met. Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201210171450.1933725-14-clg@kaod.org --- arch/powerpc/kvm/book3s_xive.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index d5d4fee7ac94..86c24a4ad809 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -218,6 +218,17 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp return xive->src_blocks[bid]; } +/* + * When the XIVE resources are allocated at the HW level, the VP + * structures describing the vCPUs of a guest are distributed among + * the chips to optimize the PowerBUS usage. For best performance, the + * guest vCPUs can be pinned to match the VP structure distribution. + * + * Currently, the VP identifiers are deduced from the vCPU id using + * the kvmppc_pack_vcpu_id() routine which is not incorrect but not + * optimal either. It VSMT is used, the result is not continuous and + * the constraints on HW resources described above can not be met. + */ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server) { return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); -- cgit v1.2.3 From 13751f8747519fe3bdc738fa6d802fbd94a85ac4 Mon Sep 17 00:00:00 2001 From: Kaixu Xia Date: Sat, 7 Nov 2020 14:26:22 +0800 Subject: KVM: PPC: Book3S: Assign boolean values to a bool variable Fix the following coccinelle warnings: ./arch/powerpc/kvm/book3s_xics.c:476:3-15: WARNING: Assignment of 0/1 to bool variable ./arch/powerpc/kvm/book3s_xics.c:504:3-15: WARNING: Assignment of 0/1 to bool variable Reported-by: Tosk Robot Signed-off-by: Kaixu Xia Reviewed-by: Greg Kurz Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1604730382-5810-1-git-send-email-kaixuxia@tencent.com --- arch/powerpc/kvm/book3s_xics.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 5fee5a11550d..303e3cb096db 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -473,7 +473,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, arch_spin_unlock(&ics->lock); local_irq_restore(flags); new_irq = reject; - check_resend = 0; + check_resend = false; goto again; } } else { @@ -501,7 +501,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, state->resend = 0; arch_spin_unlock(&ics->lock); local_irq_restore(flags); - check_resend = 0; + check_resend = false; goto again; } } -- cgit v1.2.3 From a300bf8c5f24bdeaa84925d1e0ec6221cbdc7597 Mon Sep 17 00:00:00 2001 From: Kaixu Xia Date: Sat, 7 Nov 2020 23:49:38 +0800 Subject: KVM: PPC: fix comparison to bool warning Fix the following coccicheck warning: ./arch/powerpc/kvm/booke.c:503:6-16: WARNING: Comparison to bool ./arch/powerpc/kvm/booke.c:505:6-17: WARNING: Comparison to bool ./arch/powerpc/kvm/booke.c:507:6-16: WARNING: Comparison to bool Reported-by: Tosk Robot Signed-off-by: Kaixu Xia Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1604764178-8087-1-git-send-email-kaixuxia@tencent.com --- arch/powerpc/kvm/booke.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index b1abcb816439..288a9820ec01 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -500,11 +500,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, vcpu->arch.regs.nip = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; - if (update_esr == true) + if (update_esr) kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); - if (update_dear == true) + if (update_dear) kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); - if (update_epr == true) { + if (update_epr) { if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) { -- cgit v1.2.3 From 87fb4978ef8f7e3d6f51ea8e259638c4e96f2fc0 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Tue, 8 Dec 2020 18:57:08 -0300 Subject: KVM: PPC: Book3S HV: Fix mask size for emulated msgsndp According to ISAv3.1 and ISAv3.0b, the msgsndp is described to split RB in: msgtype <- (RB) 32:36 payload <- (RB) 37:63 t <- (RB) 57:63 The current way of getting 'msgtype', and 't' is missing their MSB: msgtype: ((arg >> 27) & 0xf) : Gets (RB) 33:36, missing bit 32 t: (arg &= 0x3f) : Gets (RB) 58:63, missing bit 57 Fixes this by applying the correct mask. Signed-off-by: Leonardo Bras Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201208215707.31149-1-leobras.c@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index cfaa91b27112..6f612d240392 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1241,9 +1241,9 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) switch (get_xop(inst)) { case OP_31_XOP_MSGSNDP: arg = kvmppc_get_gpr(vcpu, rb); - if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER) + if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) break; - arg &= 0x3f; + arg &= 0x7f; if (arg >= kvm->arch.emul_smt_mode) break; tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); @@ -1256,7 +1256,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) break; case OP_31_XOP_MSGCLRP: arg = kvmppc_get_gpr(vcpu, rb); - if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER) + if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) break; vcpu->arch.vcore->dpdes = 0; vcpu->arch.doorbell_request = 0; -- cgit v1.2.3