From fda7409a8fcfa457814f8186f2861a9f00008e75 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 8 Aug 2022 17:32:05 +0800 Subject: irqchip/loongson-pch-pic: Move find_pch_pic() into CONFIG_ACPI MIPS doesn't declare find_pch_pic(), which makes a build warning: >> drivers/irqchip/irq-loongson-pch-pic.c:51:5: warning: no previous prototype for function 'find_pch_pic' [-Wmissing-prototypes] int find_pch_pic(u32 gsi) ^ drivers/irqchip/irq-loongson-pch-pic.c:51:1: note: declare 'static' if the function is not intended to be used outside of this translation unit int find_pch_pic(u32 gsi) ^ static 1 warning generated. Move find_pch_pic() into CONFIG_ACPI which only used by LoongArch to fix the warning. BTW, remove the duplicated declaration of find_pch_pic() in irq.h. Reported-by: kernel test robot Signed-off-by: Huacai Chen Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220808093205.3658485-1-chenhuacai@loongson.cn --- arch/loongarch/include/asm/irq.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 149b2123e7f4..093aee99167d 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -81,7 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS]; #define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE #define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1) -extern int find_pch_pic(u32 gsi); extern int eiointc_get_node(int id); struct acpi_madt_lio_pic; -- cgit v1.2.3 From 8924779df820c53875abaeb10c648e9cb75b46d4 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Sat, 13 Aug 2022 15:59:43 -0700 Subject: x86/kprobes: Fix JNG/JNLE emulation When kprobes emulates JNG/JNLE instructions on x86 it uses the wrong condition. For JNG (opcode: 0F 8E), according to Intel SDM, the jump is performed if (ZF == 1 or SF != OF). However the kernel emulation currently uses 'and' instead of 'or'. As a result, setting a kprobe on JNG/JNLE might cause the kernel to behave incorrectly whenever the kprobe is hit. Fix by changing the 'and' to 'or'. Fixes: 6256e668b7af ("x86/kprobes: Use int3 instead of debug trap for single-step") Signed-off-by: Nadav Amit Signed-off-by: Ingo Molnar Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220813225943.143767-1-namit@vmware.com --- arch/x86/kernel/kprobes/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 74167dc5f55e..4c3c27b6aea3 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -505,7 +505,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); if (p->ainsn.jcc.type >= 0xe) - match = match && (regs->flags & X86_EFLAGS_ZF); + match = match || (regs->flags & X86_EFLAGS_ZF); } __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert)); } -- cgit v1.2.3 From 8d48562a2729742f767b0fdd994d6b2a56a49c63 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 15 Aug 2022 16:55:23 +1000 Subject: powerpc/pci: Fix get_phb_number() locking The recent change to get_phb_number() causes a DEBUG_ATOMIC_SLEEP warning on some systems: BUG: sleeping function called from invalid context at kernel/locking/mutex.c:580 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 1, name: swapper preempt_count: 1, expected: 0 RCU nest depth: 0, expected: 0 1 lock held by swapper/1: #0: c157efb0 (hose_spinlock){+.+.}-{2:2}, at: pcibios_alloc_controller+0x64/0x220 Preemption disabled at: [<00000000>] 0x0 CPU: 0 PID: 1 Comm: swapper Not tainted 5.19.0-yocto-standard+ #1 Call Trace: [d101dc90] [c073b264] dump_stack_lvl+0x50/0x8c (unreliable) [d101dcb0] [c0093b70] __might_resched+0x258/0x2a8 [d101dcd0] [c0d3e634] __mutex_lock+0x6c/0x6ec [d101dd50] [c0a84174] of_alias_get_id+0x50/0xf4 [d101dd80] [c002ec78] pcibios_alloc_controller+0x1b8/0x220 [d101ddd0] [c140c9dc] pmac_pci_init+0x198/0x784 [d101de50] [c140852c] discover_phbs+0x30/0x4c [d101de60] [c0007fd4] do_one_initcall+0x94/0x344 [d101ded0] [c1403b40] kernel_init_freeable+0x1a8/0x22c [d101df10] [c00086e0] kernel_init+0x34/0x160 [d101df30] [c001b334] ret_from_kernel_thread+0x5c/0x64 This is because pcibios_alloc_controller() holds hose_spinlock but of_alias_get_id() takes of_mutex which can sleep. The hose_spinlock protects the phb_bitmap, and also the hose_list, but it doesn't need to be held while get_phb_number() calls the OF routines, because those are only looking up information in the device tree. So fix it by having get_phb_number() take the hose_spinlock itself, only where required, and then dropping the lock before returning. pcibios_alloc_controller() then needs to take the lock again before the list_add() but that's safe, the order of the list is not important. Fixes: 0fe1e96fef0a ("powerpc/pci: Prefer PCI domain assignment via DT 'linux,pci-domain' and alias") Reported-by: Guenter Roeck Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220815065550.1303620-1-mpe@ellerman.id.au --- arch/powerpc/kernel/pci-common.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index bdd3332200c5..31de91c8359c 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -68,10 +68,6 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops) pci_dma_ops = dma_ops; } -/* - * This function should run under locking protection, specifically - * hose_spinlock. - */ static int get_phb_number(struct device_node *dn) { int ret, phb_id = -1; @@ -108,15 +104,20 @@ static int get_phb_number(struct device_node *dn) if (!ret) phb_id = (int)(prop & (MAX_PHBS - 1)); + spin_lock(&hose_spinlock); + /* We need to be sure to not use the same PHB number twice. */ if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap)) - return phb_id; + goto out_unlock; /* If everything fails then fallback to dynamic PHB numbering. */ phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS); BUG_ON(phb_id >= MAX_PHBS); set_bit(phb_id, phb_bitmap); +out_unlock: + spin_unlock(&hose_spinlock); + return phb_id; } @@ -127,10 +128,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev) phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; - spin_lock(&hose_spinlock); + phb->global_number = get_phb_number(dev); + + spin_lock(&hose_spinlock); list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); + phb->dn = dev; phb->is_dynamic = slab_is_available(); #ifdef CONFIG_PPC64 -- cgit v1.2.3 From 72cbc8f04fe2fa93443c0fcccb7ad91dfea3d9ce Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 28 Apr 2022 16:50:29 +0200 Subject: x86/PAT: Have pat_enabled() properly reflect state when running on Xen After commit ID in the Fixes: tag, pat_enabled() returns false (because of PAT initialization being suppressed in the absence of MTRRs being announced to be available). This has become a problem: the i915 driver now fails to initialize when running PV on Xen (i915_gem_object_pin_map() is where I located the induced failure), and its error handling is flaky enough to (at least sometimes) result in a hung system. Yet even beyond that problem the keying of the use of WC mappings to pat_enabled() (see arch_can_pci_mmap_wc()) means that in particular graphics frame buffer accesses would have been quite a bit less optimal than possible. Arrange for the function to return true in such environments, without undermining the rest of PAT MSR management logic considering PAT to be disabled: specifically, no writes to the PAT MSR should occur. For the new boolean to live in .init.data, init_cache_modes() also needs moving to .init.text (where it could/should have lived already before). [ bp: This is the "small fix" variant for stable. It'll get replaced with a proper PAT and MTRR detection split upstream but that is too involved for a stable backport. - additional touchups to commit msg. Use cpu_feature_enabled(). ] Fixes: bdd8b6c98239 ("drm/i915: replace X86_FEATURE_PAT with pat_enabled()") Signed-off-by: Jan Beulich Signed-off-by: Borislav Petkov Acked-by: Ingo Molnar Cc: Cc: Juergen Gross Cc: Lucas De Marchi Link: https://lore.kernel.org/r/9385fa60-fa5d-f559-a137-6608408f88b0@suse.com --- arch/x86/mm/pat/memtype.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index d5ef64ddd35e..66a209f7eb86 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -62,6 +62,7 @@ static bool __read_mostly pat_bp_initialized; static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); +static bool __initdata pat_force_disabled = !IS_ENABLED(CONFIG_X86_PAT); static bool __read_mostly pat_bp_enabled; static bool __read_mostly pat_cm_initialized; @@ -86,6 +87,7 @@ void pat_disable(const char *msg_reason) static int __init nopat(char *str) { pat_disable("PAT support disabled via boot option."); + pat_force_disabled = true; return 0; } early_param("nopat", nopat); @@ -272,7 +274,7 @@ static void pat_ap_init(u64 pat) wrmsrl(MSR_IA32_CR_PAT, pat); } -void init_cache_modes(void) +void __init init_cache_modes(void) { u64 pat = 0; @@ -313,6 +315,12 @@ void init_cache_modes(void) */ pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); + } else if (!pat_force_disabled && cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) { + /* + * Clearly PAT is enabled underneath. Allow pat_enabled() to + * reflect this. + */ + pat_bp_enabled = true; } __init_cache_modes(pat); -- cgit v1.2.3 From 7b6670b03641ac308aaa6fa2e6f964ac993b5ea3 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 20 Jun 2022 11:45:34 +0200 Subject: s390/hypfs: avoid error message under KVM When booting under KVM the following error messages are issued: hypfs.7f5705: The hardware system does not support hypfs hypfs.7a79f0: Initialization of hypfs failed with rc=-61 Demote the severity of first message from "error" to "info" and issue the second message only in other error cases. Signed-off-by: Juergen Gross Acked-by: Heiko Carstens Acked-by: Christian Borntraeger Link: https://lore.kernel.org/r/20220620094534.18967-1-jgross@suse.com [arch/s390/hypfs/hypfs_diag.c changed description] Signed-off-by: Alexander Gordeev --- arch/s390/hypfs/hypfs_diag.c | 2 +- arch/s390/hypfs/inode.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index f0bc4dc3e9bf..6511d15ace45 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -437,7 +437,7 @@ __init int hypfs_diag_init(void) int rc; if (diag204_probe()) { - pr_err("The hardware system does not support hypfs\n"); + pr_info("The hardware system does not support hypfs\n"); return -ENODATA; } diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 5c97f48cea91..ee919bfc8186 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -496,9 +496,9 @@ fail_hypfs_sprp_exit: hypfs_vm_exit(); fail_hypfs_diag_exit: hypfs_diag_exit(); + pr_err("Initialization of hypfs failed with rc=%i\n", rc); fail_dbfs_exit: hypfs_dbfs_exit(); - pr_err("Initialization of hypfs failed with rc=%i\n", rc); return rc; } device_initcall(hypfs_init) -- cgit v1.2.3 From 8535c239ac674f7ead0f2652932d35c52c4123b2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Aug 2022 16:06:04 +0100 Subject: nios2: page fault et.al. are *not* restartable syscalls... make sure that ->orig_r2 is negative for everything except the syscalls. Fixes: 82ed08dd1b0e ("nios2: Exception handling") Signed-off-by: Al Viro Signed-off-by: Dinh Nguyen --- arch/nios2/include/asm/entry.h | 3 ++- arch/nios2/kernel/entry.S | 4 +--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h index cf37f55efbc2..bafb7b2ca59f 100644 --- a/arch/nios2/include/asm/entry.h +++ b/arch/nios2/include/asm/entry.h @@ -50,7 +50,8 @@ stw r13, PT_R13(sp) stw r14, PT_R14(sp) stw r15, PT_R15(sp) - stw r2, PT_ORIG_R2(sp) + movi r24, -1 + stw r24, PT_ORIG_R2(sp) stw r7, PT_ORIG_R7(sp) stw ra, PT_RA(sp) diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 0794cd7803df..fbd348bf103f 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -185,6 +185,7 @@ ENTRY(handle_system_call) ldw r5, PT_R5(sp) local_restart: + stw r2, PT_ORIG_R2(sp) /* Check that the requested system call is within limits */ movui r1, __NR_syscalls bgeu r2, r1, ret_invsyscall @@ -336,9 +337,6 @@ external_interrupt: /* skip if no interrupt is pending */ beq r12, r0, ret_from_interrupt - movi r24, -1 - stw r24, PT_ORIG_R2(sp) - /* * Process an external hardware interrupt. */ -- cgit v1.2.3 From 45ec746c65097c25e77d24eae8fee0def5b6cc5d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Aug 2022 16:06:46 +0100 Subject: nios2: don't leave NULLs in sys_call_table[] fill the gaps in there with sys_ni_syscall, as everyone does... Fixes: 82ed08dd1b0e ("nios2: Exception handling") Signed-off-by: Al Viro Signed-off-by: Dinh Nguyen --- arch/nios2/kernel/entry.S | 1 - arch/nios2/kernel/syscall_table.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index fbd348bf103f..8f41ef5b0399 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -193,7 +193,6 @@ local_restart: movhi r11, %hiadj(sys_call_table) add r1, r1, r11 ldw r1, %lo(sys_call_table)(r1) - beq r1, r0, ret_invsyscall /* Check if we are being traced */ GET_THREAD_INFO r11 diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c index 6176d63023c1..c2875a6dd5a4 100644 --- a/arch/nios2/kernel/syscall_table.c +++ b/arch/nios2/kernel/syscall_table.c @@ -13,5 +13,6 @@ #define __SYSCALL(nr, call) [nr] = (call), void *sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls-1] = sys_ni_syscall, #include }; -- cgit v1.2.3 From 25ba820ef36bdbaf9884adeac69b6e1821a7df76 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Aug 2022 16:07:21 +0100 Subject: nios2: traced syscall does need to check the syscall number all checks done before letting the tracer modify the register state are worthless... Fixes: 82ed08dd1b0e ("nios2: Exception handling") Signed-off-by: Al Viro Signed-off-by: Dinh Nguyen --- arch/nios2/kernel/entry.S | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 8f41ef5b0399..fd2449031d08 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -255,9 +255,9 @@ traced_system_call: ldw r6, PT_R6(sp) ldw r7, PT_R7(sp) - /* Fetch the syscall function, we don't need to check the boundaries - * since this is already done. - */ + /* Fetch the syscall function. */ + movui r1, __NR_syscalls + bgeu r2, r1, traced_invsyscall slli r1, r2, 2 movhi r11,%hiadj(sys_call_table) add r1, r1, r11 @@ -287,6 +287,11 @@ end_translate_rc_and_ret2: RESTORE_SWITCH_STACK br ret_from_exception + /* If the syscall number was invalid return ENOSYS */ +traced_invsyscall: + movi r2, -ENOSYS + br translate_rc_and_ret2 + Luser_return: GET_THREAD_INFO r11 /* get thread_info pointer */ ldw r10, TI_FLAGS(r11) /* get thread_info->flags */ -- cgit v1.2.3 From 2d631bd58fe0ea3e3350212e23c9aba1fb606514 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Aug 2022 16:08:48 +0100 Subject: nios2: fix syscall restart checks sys_foo() returns -512 (aka -ERESTARTSYS) => do_signal() sees 512 in r2 and 1 in r1. sys_foo() returns 512 => do_signal() sees 512 in r2 and 0 in r1. The former is restart-worthy; the latter obviously isn't. Fixes: b53e906d255d ("nios2: Signal handling support") Signed-off-by: Al Viro Signed-off-by: Dinh Nguyen --- arch/nios2/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index cb0b91589cf2..15e672d7150e 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs) /* * If we were from a system call, check for system call restarting... */ - if (regs->orig_r2 >= 0) { + if (regs->orig_r2 >= 0 && regs->r1) { continue_addr = regs->ea; restart_addr = continue_addr - 4; retval = regs->r2; -- cgit v1.2.3 From 411a76b7219555c55867466c82d70ce928d6c9e1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Aug 2022 16:09:16 +0100 Subject: nios2: restarts apply only to the first sigframe we build... Fixes: b53e906d255d ("nios2: Signal handling support") Signed-off-by: Al Viro Signed-off-by: Dinh Nguyen --- arch/nios2/kernel/signal.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 15e672d7150e..a5b93a30c6eb 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs) regs->ea = restart_addr; break; } + regs->orig_r2 = -1; } if (get_signal(&ksig)) { -- cgit v1.2.3 From fd0c153daad135d0ec1a53c5dbe6936a724d6ae1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Aug 2022 16:09:45 +0100 Subject: nios2: add force_successful_syscall_return() If we use the ancient SysV syscall ABI, we'd better have tell the kernel how to claim that a negative return value is a success. Use ->orig_r2 for that - it's inaccessible via ptrace, so it's a fair game for changes and it's normally[*] non-negative on return from syscall. Set to -1; syscall is not going to be restart-worthy by definition, so we won't interfere with that use either. [*] the only exception is rt_sigreturn(), where we skip the entire messing with r1/r2 anyway. Fixes: 82ed08dd1b0e ("nios2: Exception handling") Signed-off-by: Al Viro Signed-off-by: Dinh Nguyen --- arch/nios2/include/asm/ptrace.h | 2 ++ arch/nios2/kernel/entry.S | 6 ++++++ 2 files changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 642462144872..9da34c3022a2 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h @@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *); ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\ - 1) +#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1) + int do_syscall_trace_enter(void); void do_syscall_trace_exit(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index fd2449031d08..99f0a65e6234 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -213,6 +213,9 @@ local_restart: translate_rc_and_ret: movi r1, 0 bge r2, zero, 3f + ldw r1, PT_ORIG_R2(sp) + addi r1, r1, 1 + beq r1, zero, 3f sub r2, zero, r2 movi r1, 1 3: @@ -276,6 +279,9 @@ traced_system_call: translate_rc_and_ret2: movi r1, 0 bge r2, zero, 4f + ldw r1, PT_ORIG_R2(sp) + addi r1, r1, 1 + beq r1, zero, 4f sub r2, zero, r2 movi r1, 1 4: -- cgit v1.2.3 From 9993a4f989c7ca5e227329b2878f65d05c9fc20f Mon Sep 17 00:00:00 2001 From: Michael S. Tsirkin Date: Tue, 16 Aug 2022 01:36:58 -0400 Subject: virtio: Revert "virtio: find_vqs() add arg sizes" This reverts commit a10fba0377145fccefea4dc4dd5915b7ed87e546: the proposed API isn't supported on all transports but no effort was made to address this. It might not be hard to fix if we want to: maybe just rename size to size_hint and make sure legacy transports ignore the hint. But it's not sure what the benefit is in any case, so let's drop it. Fixes: a10fba037714 ("virtio: find_vqs() add arg sizes") Signed-off-by: Michael S. Tsirkin Message-Id: <20220816053602.173815-8-mst@redhat.com> --- arch/um/drivers/virtio_uml.c | 2 +- drivers/platform/mellanox/mlxbf-tmfifo.c | 1 - drivers/remoteproc/remoteproc_virtio.c | 1 - drivers/s390/virtio/virtio_ccw.c | 1 - drivers/virtio/virtio_mmio.c | 1 - drivers/virtio/virtio_pci_common.c | 2 +- drivers/virtio/virtio_pci_common.h | 2 +- drivers/virtio/virtio_pci_modern.c | 7 ++----- drivers/virtio/virtio_vdpa.c | 1 - include/linux/virtio_config.h | 14 +++++--------- 10 files changed, 10 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 79e38afd4b91..e719af8bdf56 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -1011,7 +1011,7 @@ error_kzalloc: static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], u32 sizes[], const bool *ctx, + const char * const names[], const bool *ctx, struct irq_affinity *desc) { struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 8be13d416f48..1ae3c56b66b0 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -928,7 +928,6 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], - u32 sizes[], const bool *ctx, struct irq_affinity *desc) { diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 81c4f5776109..0f7706e23eb9 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -158,7 +158,6 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], - u32 sizes[], const bool * ctx, struct irq_affinity *desc) { diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 896896e32664..a10dbe632ef9 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -637,7 +637,6 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], - u32 sizes[], const bool *ctx, struct irq_affinity *desc) { diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index dfcecfd7aba1..3ff746e3f24a 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -474,7 +474,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], - u32 sizes[], const bool *ctx, struct irq_affinity *desc) { diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 7ad734584823..ad258a9d3b9f 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -396,7 +396,7 @@ out_del_vqs: /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], u32 sizes[], const bool *ctx, + const char * const names[], const bool *ctx, struct irq_affinity *desc) { int err; diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index a5ff838b85a5..23112d84218f 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -110,7 +110,7 @@ void vp_del_vqs(struct virtio_device *vdev); /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], u32 sizes[], const bool *ctx, + const char * const names[], const bool *ctx, struct irq_affinity *desc); const char *vp_bus_name(struct virtio_device *vdev); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index be51ec849252..c3b9f2761849 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -347,15 +347,12 @@ err: static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], - u32 sizes[], - const bool *ctx, + const char * const names[], const bool *ctx, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, sizes, ctx, - desc); + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); if (rc) return rc; diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 832d2c5b1b19..9670cc79371d 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -269,7 +269,6 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], - u32 sizes[], const bool *ctx, struct irq_affinity *desc) { diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 888f7e96f0c7..36ec7be1f480 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -55,7 +55,6 @@ struct virtio_shm_region { * include a NULL entry for vqs that do not need a callback * names: array of virtqueue names (mainly for debugging) * include a NULL entry for vqs unused by driver - * sizes: array of virtqueue sizes * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). * @synchronize_cbs: synchronize with the virtqueue callbacks (optional) @@ -104,9 +103,7 @@ struct virtio_config_ops { void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], - u32 sizes[], - const bool *ctx, + const char * const names[], const bool *ctx, struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); void (*synchronize_cbs)(struct virtio_device *); @@ -215,7 +212,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, const char *names[] = { n }; struct virtqueue *vq; int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL, - NULL, NULL); + NULL); if (err < 0) return ERR_PTR(err); return vq; @@ -227,8 +224,7 @@ int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, const char * const names[], struct irq_affinity *desc) { - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, - NULL, desc); + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); } static inline @@ -237,8 +233,8 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs, const char * const names[], const bool *ctx, struct irq_affinity *desc) { - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, - ctx, desc); + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, + desc); } /** -- cgit v1.2.3 From 5b9f0c4df1c1152403c738373fb063e9ffdac0a1 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 16 Aug 2022 09:11:37 +0200 Subject: x86/entry: Fix entry_INT80_compat for Xen PV guests Commit c89191ce67ef ("x86/entry: Convert SWAPGS to swapgs and remove the definition of SWAPGS") missed one use case of SWAPGS in entry_INT80_compat(). Removing of the SWAPGS macro led to asm just using "swapgs", as it is accepting instructions in capital letters, too. This in turn leads to splats in Xen PV guests like: [ 36.145223] general protection fault, maybe for address 0x2d: 0000 [#1] PREEMPT SMP NOPTI [ 36.145794] CPU: 2 PID: 1847 Comm: ld-linux.so.2 Not tainted 5.19.1-1-default #1 \ openSUSE Tumbleweed f3b44bfb672cdb9f235aff53b57724eba8b9411b [ 36.146608] Hardware name: HP ProLiant ML350p Gen8, BIOS P72 11/14/2013 [ 36.148126] RIP: e030:entry_INT80_compat+0x3/0xa3 Fix that by open coding this single instance of the SWAPGS macro. Fixes: c89191ce67ef ("x86/entry: Convert SWAPGS to swapgs and remove the definition of SWAPGS") Signed-off-by: Juergen Gross Signed-off-by: Borislav Petkov Reviewed-by: Jan Beulich Cc: # 5.19 Link: https://lore.kernel.org/r/20220816071137.4893-1-jgross@suse.com --- arch/x86/entry/entry_64_compat.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 682338e7e2a3..4dd19819053a 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -311,7 +311,7 @@ SYM_CODE_START(entry_INT80_compat) * Interrupts are off on entry. */ ASM_CLAC /* Do this early to minimize exposure */ - SWAPGS + ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV /* * User tracing code (ptrace or signal handlers) might assume that -- cgit v1.2.3 From c4e34dd99f2e3fdfc63584078ce0fed20f4e7386 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 14 Aug 2022 14:16:13 -0700 Subject: x86: simplify load_unaligned_zeropad() implementation The exception for the "unaligned access at the end of the page, next page not mapped" never happens, but the fixup code ends up causing trouble for compilers to optimize well. clang in particular ends up seeing it being in the middle of a loop, and tries desperately to optimize the exception fixup code that is never really reached. The simple solution is to just move all the fixups into the exception handler itself, which moves it all out of the hot case code, and means that the compiler never sees it or needs to worry about it. Acked-by: Peter Zijlstra Signed-off-by: Linus Torvalds --- arch/x86/include/asm/extable_fixup_types.h | 2 ++ arch/x86/include/asm/word-at-a-time.h | 46 ++----------------------- arch/x86/mm/extable.c | 55 ++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 43 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h index 503622627400..991e31cfde94 100644 --- a/arch/x86/include/asm/extable_fixup_types.h +++ b/arch/x86/include/asm/extable_fixup_types.h @@ -64,4 +64,6 @@ #define EX_TYPE_UCOPY_LEN4 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4)) #define EX_TYPE_UCOPY_LEN8 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8)) +#define EX_TYPE_ZEROPAD 20 /* longword load with zeropad on fault */ + #endif diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h index 8338b0432b50..46b4f1f7f354 100644 --- a/arch/x86/include/asm/word-at-a-time.h +++ b/arch/x86/include/asm/word-at-a-time.h @@ -77,58 +77,18 @@ static inline unsigned long find_zero(unsigned long mask) * and the next page not being mapped, take the exception and * return zeroes in the non-existing part. */ -#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT - static inline unsigned long load_unaligned_zeropad(const void *addr) { - unsigned long offset, data; unsigned long ret; - asm_volatile_goto( + asm volatile( "1: mov %[mem], %[ret]\n" - - _ASM_EXTABLE(1b, %l[do_exception]) - - : [ret] "=r" (ret) - : [mem] "m" (*(unsigned long *)addr) - : : do_exception); - - return ret; - -do_exception: - offset = (unsigned long)addr & (sizeof(long) - 1); - addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1)); - data = *(unsigned long *)addr; - ret = data >> offset * 8; - - return ret; -} - -#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ - -static inline unsigned long load_unaligned_zeropad(const void *addr) -{ - unsigned long offset, data; - unsigned long ret, err = 0; - - asm( "1: mov %[mem], %[ret]\n" "2:\n" - - _ASM_EXTABLE_FAULT(1b, 2b) - - : [ret] "=&r" (ret), "+a" (err) + _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD) + : [ret] "=r" (ret) : [mem] "m" (*(unsigned long *)addr)); - if (unlikely(err)) { - offset = (unsigned long)addr & (sizeof(long) - 1); - addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1)); - data = *(unsigned long *)addr; - ret = data >> offset * 8; - } - return ret; } -#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ - #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 331310c29349..60814e110a54 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -41,6 +41,59 @@ static bool ex_handler_default(const struct exception_table_entry *e, return true; } +/* + * This is the *very* rare case where we do a "load_unaligned_zeropad()" + * and it's a page crosser into a non-existent page. + * + * This happens when we optimistically load a pathname a word-at-a-time + * and the name is less than the full word and the next page is not + * mapped. Typically that only happens for CONFIG_DEBUG_PAGEALLOC. + * + * NOTE! The faulting address is always a 'mov mem,reg' type instruction + * of size 'long', and the exception fixup must always point to right + * after the instruction. + */ +static bool ex_handler_zeropad(const struct exception_table_entry *e, + struct pt_regs *regs, + unsigned long fault_addr) +{ + struct insn insn; + const unsigned long mask = sizeof(long) - 1; + unsigned long offset, addr, next_ip, len; + unsigned long *reg; + + next_ip = ex_fixup_addr(e); + len = next_ip - regs->ip; + if (len > MAX_INSN_SIZE) + return false; + + if (insn_decode(&insn, (void *) regs->ip, len, INSN_MODE_KERN)) + return false; + if (insn.length != len) + return false; + + if (insn.opcode.bytes[0] != 0x8b) + return false; + if (insn.opnd_bytes != sizeof(long)) + return false; + + addr = (unsigned long) insn_get_addr_ref(&insn, regs); + if (addr == ~0ul) + return false; + + offset = addr & mask; + addr = addr & ~mask; + if (fault_addr != addr + sizeof(long)) + return false; + + reg = insn_get_modrm_reg_ptr(&insn, regs); + if (!reg) + return false; + + *reg = *(unsigned long *)addr >> (offset * 8); + return ex_handler_default(e, regs); +} + static bool ex_handler_fault(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr) { @@ -217,6 +270,8 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, return ex_handler_sgx(e, regs, trapnr); case EX_TYPE_UCOPY_LEN: return ex_handler_ucopy_len(e, regs, trapnr, reg, imm); + case EX_TYPE_ZEROPAD: + return ex_handler_zeropad(e, regs, fault_addr); } BUG(); } -- cgit v1.2.3 From eb6354e116305afbfde196be5120bfa8669fdc6a Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 16 Aug 2022 18:30:58 +0200 Subject: riscv: Ensure isa-ext static keys are writable riscv_isa_ext_keys[] is an array of static keys used in the unified ISA extension framework. The keys added to this array may be used anywhere, including in modules. Ensure the keys remain writable by placing them in the data section. The need to change riscv_isa_ext_keys[]'s section was found when the kvm module started failing to load. Commit 8eb060e10185 ("arch/riscv: add Zihintpause support") adds a static branch check for a newly added isa-ext key to cpu_relax(), which kvm uses. Fixes: c360cbec3511 ("riscv: introduce unified static key mechanism for ISA extensions") Signed-off-by: Andrew Jones Cc: stable@vger.kernel.org Reported-by: Ron Economos Reported-by: Anup Patel Reported-by: Conor Dooley Tested-by: Atish Patra Link: https://lore.kernel.org/r/20220816163058.3004536-1-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 553d755483ed..3b5583db9d80 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -28,7 +28,7 @@ unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly; -__ro_after_init DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX); +DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX); EXPORT_SYMBOL(riscv_isa_ext_keys); /** -- cgit v1.2.3 From 5e1e087457c94ad7fafbe1cf6f774c6999ee29d4 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Tue, 9 Aug 2022 12:38:48 +0800 Subject: arm64: Fix match_list for erratum 1286807 on Arm Cortex-A76 Since commit 51f559d66527 ("arm64: Enable repeat tlbi workaround on KRYO4XX gold CPUs"), we failed to detect erratum 1286807 on Cortex-A76 because its entry in arm64_repeat_tlbi_list[] was accidently corrupted by this commit. Fix this issue by creating a separate entry for Kryo4xx Gold. Fixes: 51f559d66527 ("arm64: Enable repeat tlbi workaround on KRYO4XX gold CPUs") Cc: Shreyas K K Signed-off-by: Zenghui Yu Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20220809043848.969-1-yuzenghui@huawei.com Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 7e6289e709fc..0f7e9087d900 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { #ifdef CONFIG_ARM64_ERRATUM_1286807 { ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), + }, + { /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), }, -- cgit v1.2.3 From ff5900092227ade3e31fe25f97faf406cde902e6 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Thu, 21 Jul 2022 12:04:33 +0200 Subject: arm64: adjust KASLR relocation after ARCH_RANDOM removal Commit aacd149b6238 ("arm64: head: avoid relocating the kernel twice for KASLR") adds the new file arch/arm64/kernel/pi/kaslr_early.c with a small code part guarded by '#ifdef CONFIG_ARCH_RANDOM'. Concurrently, commit 9592eef7c16e ("random: remove CONFIG_ARCH_RANDOM") removes the config CONFIG_ARCH_RANDOM and turns all '#ifdef CONFIG_ARCH_RANDOM' code parts into unconditional code parts, which is generally safe to do. Remove a needless ifdef guard after the ARCH_RANDOM removal. Signed-off-by: Lukas Bulwahn Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220721100433.18286-1-lukas.bulwahn@gmail.com Signed-off-by: Will Deacon --- arch/arm64/kernel/pi/kaslr_early.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c index 6c3855e69395..17bff6e399e4 100644 --- a/arch/arm64/kernel/pi/kaslr_early.c +++ b/arch/arm64/kernel/pi/kaslr_early.c @@ -94,11 +94,9 @@ asmlinkage u64 kaslr_early_init(void *fdt) seed = get_kaslr_seed(fdt); if (!seed) { -#ifdef CONFIG_ARCH_RANDOM - if (!__early_cpu_has_rndr() || - !__arm64_rndr((unsigned long *)&seed)) -#endif - return 0; + if (!__early_cpu_has_rndr() || + !__arm64_rndr((unsigned long *)&seed)) + return 0; } /* -- cgit v1.2.3 From 34fc9cc3aebe8b9e27d3bc821543dd482dc686ca Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Wed, 17 Aug 2022 15:25:21 +0200 Subject: riscv: dts: microchip: correct L2 cache interrupts The "PolarFire SoC MSS Technical Reference Manual" documents the following PLIC interrupts: 1 - L2 Cache Controller Signals when a metadata correction event occurs 2 - L2 Cache Controller Signals when an uncorrectable metadata event occurs 3 - L2 Cache Controller Signals when a data correction event occurs 4 - L2 Cache Controller Signals when an uncorrectable data event occurs This differs from the SiFive FU540 which only has three L2 cache related interrupts. The sequence in the device tree is defined by an enum: enum {         DIR_CORR = 0,         DATA_CORR,         DATA_UNCORR,         DIR_UNCORR, }; So the correct sequence of the L2 cache interrupts is interrupts = <1>, <3>, <4>, <2>; [Conor] This manifests as an unusable system if the l2-cache driver is enabled, as the wrong interrupt gets cleared & the handler prints errors to the console ad infinitum. Fixes: 0fa6107eca41 ("RISC-V: Initial DTS for Microchip ICICLE board") CC: stable@vger.kernel.org # 5.15: e35b07a7df9b: riscv: dts: microchip: mpfs: Group tuples in interrupt properties Signed-off-by: Heinrich Schuchardt Signed-off-by: Conor Dooley --- arch/riscv/boot/dts/microchip/mpfs.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi index 499c2e63ad35..0a6ad5b9ff8d 100644 --- a/arch/riscv/boot/dts/microchip/mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi @@ -193,7 +193,7 @@ cache-size = <2097152>; cache-unified; interrupt-parent = <&plic>; - interrupts = <1>, <2>, <3>; + interrupts = <1>, <3>, <4>, <2>; }; clint: clint@2000000 { -- cgit v1.2.3 From 7df548840c496b0141fb2404b889c346380c2b22 Mon Sep 17 00:00:00 2001 From: Pawan Gupta Date: Wed, 3 Aug 2022 14:41:32 -0700 Subject: x86/bugs: Add "unknown" reporting for MMIO Stale Data Older Intel CPUs that are not in the affected processor list for MMIO Stale Data vulnerabilities currently report "Not affected" in sysfs, which may not be correct. Vulnerability status for these older CPUs is unknown. Add known-not-affected CPUs to the whitelist. Report "unknown" mitigation status for CPUs that are not in blacklist, whitelist and also don't enumerate MSR ARCH_CAPABILITIES bits that reflect hardware immunity to MMIO Stale Data vulnerabilities. Mitigation is not deployed when the status is unknown. [ bp: Massage, fixup. ] Fixes: 8d50cdf8b834 ("x86/speculation/mmio: Add sysfs reporting for Processor MMIO Stale Data") Suggested-by: Andrew Cooper Suggested-by: Tony Luck Signed-off-by: Pawan Gupta Signed-off-by: Borislav Petkov Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/a932c154772f2121794a5f2eded1a11013114711.1657846269.git.pawan.kumar.gupta@linux.intel.com --- .../hw-vuln/processor_mmio_stale_data.rst | 14 ++++++++ arch/x86/include/asm/cpufeatures.h | 5 +-- arch/x86/kernel/cpu/bugs.c | 14 ++++++-- arch/x86/kernel/cpu/common.c | 42 ++++++++++++++-------- 4 files changed, 56 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst index 9393c50b5afc..c98fd11907cc 100644 --- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -230,6 +230,20 @@ The possible values in this file are: * - 'Mitigation: Clear CPU buffers' - The processor is vulnerable and the CPU buffer clearing mitigation is enabled. + * - 'Unknown: No mitigations' + - The processor vulnerability status is unknown because it is + out of Servicing period. Mitigation is not attempted. + +Definitions: +------------ + +Servicing period: The process of providing functional and security updates to +Intel processors or platforms, utilizing the Intel Platform Update (IPU) +process or other similar mechanisms. + +End of Servicing Updates (ESU): ESU is the date at which Intel will no +longer provide Servicing, such as through IPU or other similar update +processes. ESU dates will typically be aligned to end of quarter. If the processor is vulnerable then the following information is appended to the above information: diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 235dc85c91c3..ef4775c6db01 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -457,7 +457,8 @@ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ -#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */ -#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ +#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ +#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ +#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 510d85261132..da7c361f47e0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void) u64 ia32_cap; if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || - cpu_mitigations_off()) { + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || + cpu_mitigations_off()) { mmio_mitigation = MMIO_MITIGATION_OFF; return; } @@ -538,6 +539,8 @@ out: pr_info("TAA: %s\n", taa_strings[taa_mitigation]); if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); + else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) + pr_info("MMIO Stale Data: Unknown: No mitigations\n"); } static void __init md_clear_select_mitigation(void) @@ -2275,6 +2278,9 @@ static ssize_t tsx_async_abort_show_state(char *buf) static ssize_t mmio_stale_data_show_state(char *buf) { + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) + return sysfs_emit(buf, "Unknown: No mitigations\n"); + if (mmio_mitigation == MMIO_MITIGATION_OFF) return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); @@ -2421,6 +2427,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return srbds_show_state(buf); case X86_BUG_MMIO_STALE_DATA: + case X86_BUG_MMIO_UNKNOWN: return mmio_stale_data_show_state(buf); case X86_BUG_RETBLEED: @@ -2480,7 +2487,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) { - return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); + else + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); } ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 64a73f415f03..3e508f239098 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1135,7 +1135,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #define NO_SWAPGS BIT(6) #define NO_ITLB_MULTIHIT BIT(7) #define NO_SPECTRE_V2 BIT(8) -#define NO_EIBRS_PBRSB BIT(9) +#define NO_MMIO BIT(9) +#define NO_EIBRS_PBRSB BIT(10) #define VULNWL(vendor, family, model, whitelist) \ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) @@ -1158,6 +1159,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION), /* Intel Family 6 */ + VULNWL_INTEL(TIGERLAKE, NO_MMIO), + VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), + VULNWL_INTEL(ALDERLAKE, NO_MMIO), + VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), @@ -1176,9 +1182,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), /* * Technically, swapgs isn't serializing on AMD (despite it previously @@ -1193,18 +1199,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), /* AMD Family 0xf - 0x12 */ - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), /* Zhaoxin Family 7 */ - VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), - VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), {} }; @@ -1358,10 +1364,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) * Affected CPU list is generally enough to enumerate the vulnerability, * but for virtualization case check for ARCH_CAP MSR bits also, VMM may * not want the guest to enumerate the bug. + * + * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, + * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. */ - if (cpu_matches(cpu_vuln_blacklist, MMIO) && - !arch_cap_mmio_immune(ia32_cap)) - setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + if (!arch_cap_mmio_immune(ia32_cap)) { + if (cpu_matches(cpu_vuln_blacklist, MMIO)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) + setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); + } if (!cpu_has(c, X86_FEATURE_BTC_NO)) { if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) -- cgit v1.2.3 From b5c3aca86d2698c4850b6ee8b341938025d2780c Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Sun, 14 Aug 2022 15:12:37 +0100 Subject: riscv: signal: fix missing prototype warning Fix the warning: arch/riscv/kernel/signal.c:316:27: warning: no previous prototype for function 'do_notify_resume' [-Wmissing-prototypes] asmlinkage __visible void do_notify_resume(struct pt_regs *regs, All other functions in the file are static & none of the existing headers stood out as an obvious location. Create signal.h to hold the declaration. Fixes: e2c0cdfba7f6 ("RISC-V: User-facing API") Signed-off-by: Conor Dooley Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220814141237.493457-4-mail@conchuod.ie Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/signal.h | 12 ++++++++++++ arch/riscv/kernel/signal.c | 1 + 2 files changed, 13 insertions(+) create mode 100644 arch/riscv/include/asm/signal.h (limited to 'arch') diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h new file mode 100644 index 000000000000..532c29ef0376 --- /dev/null +++ b/arch/riscv/include/asm/signal.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_SIGNAL_H +#define __ASM_SIGNAL_H + +#include +#include + +asmlinkage __visible +void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); + +#endif diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 38b05ca6fe66..5a2de6b6f882 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3 From d951b20b9def73dcc39a5379831525d0d2a537e9 Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Sun, 14 Aug 2022 15:12:38 +0100 Subject: riscv: traps: add missing prototype Sparse complains: arch/riscv/kernel/traps.c:213:6: warning: symbol 'shadow_stack' was not declared. Should it be static? The variable is used in entry.S, so declare shadow_stack there alongside SHADOW_OVERFLOW_STACK_SIZE. Fixes: 31da94c25aea ("riscv: add VMAP_STACK overflow detection") Signed-off-by: Conor Dooley Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220814141237.493457-5-mail@conchuod.ie Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/thread_info.h | 2 ++ arch/riscv/kernel/traps.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 78933ac04995..67322f878e0d 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -42,6 +42,8 @@ #ifndef __ASSEMBLY__ +extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)]; + #include #include diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 39d0f8bba4b4..635e6ec26938 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -20,9 +20,10 @@ #include #include +#include #include #include -#include +#include int show_unhandled_signals = 1; -- cgit v1.2.3 From 4e3aa9238277597c6c7624f302d81a7b568b6f2d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Aug 2022 14:28:36 +0200 Subject: x86/nospec: Unwreck the RSB stuffing Commit 2b1299322016 ("x86/speculation: Add RSB VM Exit protections") made a right mess of the RSB stuffing, rewrite the whole thing to not suck. Thanks to Andrew for the enlightening comment about Post-Barrier RSB things so we can make this code less magical. Cc: stable@vger.kernel.org Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/YvuNdDWoUZSBjYcm@worktop.programming.kicks-ass.net --- arch/x86/include/asm/nospec-branch.h | 80 ++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e64fd20778b6..10731ccfed37 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -35,33 +35,44 @@ #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ /* + * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. + */ +#define __FILL_RETURN_SLOT \ + ANNOTATE_INTRA_FUNCTION_CALL; \ + call 772f; \ + int3; \ +772: + +/* + * Stuff the entire RSB. + * * Google experimented with loop-unrolling and this turned out to be * the optimal version - two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ -#define __FILL_RETURN_BUFFER(reg, nr, sp) \ - mov $(nr/2), reg; \ -771: \ - ANNOTATE_INTRA_FUNCTION_CALL; \ - call 772f; \ -773: /* speculation trap */ \ - UNWIND_HINT_EMPTY; \ - pause; \ - lfence; \ - jmp 773b; \ -772: \ - ANNOTATE_INTRA_FUNCTION_CALL; \ - call 774f; \ -775: /* speculation trap */ \ - UNWIND_HINT_EMPTY; \ - pause; \ - lfence; \ - jmp 775b; \ -774: \ - add $(BITS_PER_LONG/8) * 2, sp; \ - dec reg; \ - jnz 771b; \ - /* barrier for jnz misprediction */ \ +#define __FILL_RETURN_BUFFER(reg, nr) \ + mov $(nr/2), reg; \ +771: \ + __FILL_RETURN_SLOT \ + __FILL_RETURN_SLOT \ + add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ + dec reg; \ + jnz 771b; \ + /* barrier for jnz misprediction */ \ + lfence; + +/* + * Stuff a single RSB slot. + * + * To mitigate Post-Barrier RSB speculation, one CALL instruction must be + * forced to retire before letting a RET instruction execute. + * + * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed + * before this point. + */ +#define __FILL_ONE_RETURN \ + __FILL_RETURN_SLOT \ + add $(BITS_PER_LONG/8), %_ASM_SP; \ lfence; #ifdef __ASSEMBLY__ @@ -132,28 +143,15 @@ #endif .endm -.macro ISSUE_UNBALANCED_RET_GUARD - ANNOTATE_INTRA_FUNCTION_CALL - call .Lunbalanced_ret_guard_\@ - int3 -.Lunbalanced_ret_guard_\@: - add $(BITS_PER_LONG/8), %_ASM_SP - lfence -.endm - /* * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * monstrosity above, manually. */ -.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2 -.ifb \ftr2 - ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr -.else - ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2 -.endif - __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) -.Lunbalanced_\@: - ISSUE_UNBALANCED_RET_GUARD +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) + ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ + __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ + __stringify(__FILL_ONE_RETURN), \ftr2 + .Lskip_rsb_\@: .endm -- cgit v1.2.3 From 332924973725e8cdcc783c175f68cf7e162cb9e5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 19 Aug 2022 13:01:35 +0200 Subject: x86/nospec: Fix i386 RSB stuffing Turns out that i386 doesn't unconditionally have LFENCE, as such the loop in __FILL_RETURN_BUFFER isn't actually speculation safe on such chips. Fixes: ba6e31af2be9 ("x86/speculation: Add LFENCE to RSB fill sequence") Reported-by: Ben Hutchings Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/Yv9tj9vbQ9nNlXoY@worktop.programming.kicks-ass.net --- arch/x86/include/asm/nospec-branch.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 10731ccfed37..c936ce9f0c47 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -50,6 +50,7 @@ * the optimal version - two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ +#ifdef CONFIG_X86_64 #define __FILL_RETURN_BUFFER(reg, nr) \ mov $(nr/2), reg; \ 771: \ @@ -60,6 +61,17 @@ jnz 771b; \ /* barrier for jnz misprediction */ \ lfence; +#else +/* + * i386 doesn't unconditionally have LFENCE, as such it can't + * do a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr) \ + .rept nr; \ + __FILL_RETURN_SLOT; \ + .endr; \ + add $(BITS_PER_LONG/8) * nr, %_ASM_SP; +#endif /* * Stuff a single RSB slot. -- cgit v1.2.3 From 88e0a74902f894fbbc55ad3ad2cb23b4bfba555c Mon Sep 17 00:00:00 2001 From: Aaron Lu Date: Fri, 19 Aug 2022 10:30:01 +0800 Subject: x86/mm: Use proper mask when setting PUD mapping Commit c164fbb40c43f("x86/mm: thread pgprot_t through init_memory_mapping()") mistakenly used __pgprot() which doesn't respect __default_kernel_pte_mask when setting PUD mapping. Fix it by only setting the one bit we actually need (PSE) and leaving the other bits (that have been properly masked) alone. Fixes: c164fbb40c43 ("x86/mm: thread pgprot_t through init_memory_mapping()") Signed-off-by: Aaron Lu Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- arch/x86/mm/init_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 39c5246964a9..0fe690ebc269 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -645,7 +645,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, pages++; spin_lock(&init_mm.page_table_lock); - prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE); + prot = __pgprot(pgprot_val(prot) | _PAGE_PSE); set_pte_init((pte_t *)pud, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, -- cgit v1.2.3 From 32ba156df1b1c8804a4e5be5339616945eafea22 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 16 Aug 2022 05:56:11 -0700 Subject: perf/x86/lbr: Enable the branch type for the Arch LBR by default On the platform with Arch LBR, the HW raw branch type encoding may leak to the perf tool when the SAVE_TYPE option is not set. In the intel_pmu_store_lbr(), the HW raw branch type is stored in lbr_entries[].type. If the SAVE_TYPE option is set, the lbr_entries[].type will be converted into the generic PERF_BR_* type in the intel_pmu_lbr_filter() and exposed to the user tools. But if the SAVE_TYPE option is NOT set by the user, the current perf kernel doesn't clear the field. The HW raw branch type leaks. There are two solutions to fix the issue for the Arch LBR. One is to clear the field if the SAVE_TYPE option is NOT set. The other solution is to unconditionally convert the branch type and expose the generic type to the user tools. The latter is implemented here, because - The branch type is valuable information. I don't see a case where you would not benefit from the branch type. (Stephane Eranian) - Not having the branch type DOES NOT save any space in the branch record (Stephane Eranian) - The Arch LBR HW can retrieve the common branch types from the LBR_INFO. It doesn't require the high overhead SW disassemble. Fixes: 47125db27e47 ("perf/x86/intel/lbr: Support Architectural LBR") Reported-by: Stephane Eranian Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20220816125612.2042397-1-kan.liang@linux.intel.com --- arch/x86/events/intel/lbr.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 4f70fb6c2c1e..47fca6a7a8bc 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1097,6 +1097,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) if (static_cpu_has(X86_FEATURE_ARCH_LBR)) { reg->config = mask; + + /* + * The Arch LBR HW can retrieve the common branch types + * from the LBR_INFO. It doesn't require the high overhead + * SW disassemble. + * Enable the branch type by default for the Arch LBR. + */ + reg->reg |= X86_BR_TYPE_SAVE; return 0; } -- cgit v1.2.3 From 7d3598868aaee05eb738d1c3115616b867e7530a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Aug 2022 19:40:57 +0800 Subject: perf/x86/core: Set pebs_capable and PMU_FL_PEBS_ALL for the Baseline The SDM explicitly states that PEBS Baseline implies Extended PEBS. For cpu model forward compatibility (e.g. on ICX, SPR, ADL), it's safe to stop doing FMS table thing such as setting pebs_capable and PMU_FL_PEBS_ALL since it's already set in the intel_ds_init(). The Goldmont Plus is the only platform which supports extended PEBS but doesn't have Baseline. Keep the status quo. Reported-by: Like Xu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Link: https://lkml.kernel.org/r/20220816114057.51307-1-likexu@tencent.com --- arch/x86/events/intel/core.c | 4 ---- arch/x86/events/intel/ds.c | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 2db93498ff71..cb98a05ee743 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6291,10 +6291,8 @@ __init int intel_pmu_init(void) x86_pmu.pebs_aliases = NULL; x86_pmu.pebs_prec_dist = true; x86_pmu.pebs_block = true; - x86_pmu.pebs_capable = ~0ULL; x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.flags |= PMU_FL_INSTR_LATENCY; x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; @@ -6337,10 +6335,8 @@ __init int intel_pmu_init(void) x86_pmu.pebs_aliases = NULL; x86_pmu.pebs_prec_dist = true; x86_pmu.pebs_block = true; - x86_pmu.pebs_capable = ~0ULL; x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.flags |= PMU_FL_INSTR_LATENCY; x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; x86_pmu.lbr_pt_coexist = true; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index ba60427caa6d..ac6dd4c96dbc 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2262,6 +2262,7 @@ void __init intel_ds_init(void) PERF_SAMPLE_BRANCH_STACK | PERF_SAMPLE_TIME; x86_pmu.flags |= PMU_FL_PEBS_ALL; + x86_pmu.pebs_capable = ~0ULL; pebs_qual = "-baseline"; x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; } else { -- cgit v1.2.3 From d4bdb0bebc5ba3299d74f123c782d99cd4e25c49 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 17 Aug 2022 22:46:13 -0700 Subject: perf/x86/intel/ds: Fix precise store latency handling With the existing code in store_latency_data(), the memory operation (mem_op) returned to the user is always OP_LOAD where in fact, it should be OP_STORE. This comes from the fact that the function is simply grabbing the information from a data source map which covers only load accesses. Intel 12th gen CPU offers precise store sampling that captures both the data source and latency. Therefore it can use the data source mapping table but must override the memory operation to reflect stores instead of loads. Fixes: 61b985e3e775 ("perf/x86/intel: Add perf core PMU support for Sapphire Rapids") Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20220818054613.1548130-1-eranian@google.com --- arch/x86/events/intel/ds.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index ac6dd4c96dbc..e5b587499122 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -291,6 +291,7 @@ static u64 load_latency_data(struct perf_event *event, u64 status) static u64 store_latency_data(struct perf_event *event, u64 status) { union intel_x86_pebs_dse dse; + union perf_mem_data_src src; u64 val; dse.val = status; @@ -304,7 +305,14 @@ static u64 store_latency_data(struct perf_event *event, u64 status) val |= P(BLK, NA); - return val; + /* + * the pebs_data_source table is only for loads + * so override the mem_op to say STORE instead + */ + src.val = val; + src.mem_op = P(OP,STORE); + + return src.val; } struct pebs_record_core { -- cgit v1.2.3 From cde643ff75bc20c538dfae787ca3b587bab16b50 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 18 Aug 2022 11:44:29 -0700 Subject: perf/x86/intel: Fix pebs event constraints for ADL According to the latest event list, the LOAD_LATENCY PEBS event only works on the GP counter 0 and 1 for ADL and RPL. Update the pebs event constraints table. Fixes: f83d2f91d259 ("perf/x86/intel: Add Alder Lake Hybrid support") Reported-by: Ammy Yi Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20220818184429.2355857-1-kan.liang@linux.intel.com --- arch/x86/events/intel/ds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index e5b587499122..de1f55d51784 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -830,7 +830,7 @@ struct event_constraint intel_glm_pebs_event_constraints[] = { struct event_constraint intel_grt_pebs_event_constraints[] = { /* Allow all events as PEBS with no flags */ - INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0xf), + INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3), INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf), EVENT_CONSTRAINT_END }; -- cgit v1.2.3 From 7ae1f5508d9a33fd58ed3059bd2d569961e3b8bd Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 20 Aug 2022 17:59:17 +0200 Subject: parisc: Fix exception handler for fldw and fstw instructions The exception handler is broken for unaligned memory acceses with fldw and fstw instructions, because it trashes or uses randomly some other floating point register than the one specified in the instruction word on loads and stores. The instruction "fldw 0(addr),%fr22L" (and the other fldw/fstw instructions) encode the target register (%fr22) in the rightmost 5 bits of the instruction word. The 7th rightmost bit of the instruction word defines if the left or right half of %fr22 should be used. While processing unaligned address accesses, the FR3() define is used to extract the offset into the local floating-point register set. But the calculation in FR3() was buggy, so that for example instead of %fr22, register %fr12 [((22 * 2) & 0x1f) = 12] was used. This bug has been since forever in the parisc kernel and I wonder why it wasn't detected earlier. Interestingly I noticed this bug just because the libime debian package failed to build on *native* hardware, while it successfully built in qemu. This patch corrects the bitshift and masking calculation in FR3(). Signed-off-by: Helge Deller Cc: --- arch/parisc/kernel/unaligned.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index bac581b5ecfc..e8a4d77cff53 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -93,7 +93,7 @@ #define R1(i) (((i)>>21)&0x1f) #define R2(i) (((i)>>16)&0x1f) #define R3(i) ((i)&0x1f) -#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1)) +#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1)) #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) #define IM5_2(i) IM((i)>>16,5) #define IM5_3(i) IM((i),5) -- cgit v1.2.3 From fc2e426b1161761561624ebd43ce8c8d2fa058da Mon Sep 17 00:00:00 2001 From: Chen Zhongjin Date: Fri, 19 Aug 2022 16:43:34 +0800 Subject: x86/unwind/orc: Unwind ftrace trampolines with correct ORC entry When meeting ftrace trampolines in ORC unwinding, unwinder uses address of ftrace_{regs_}call address to find the ORC entry, which gets next frame at sp+176. If there is an IRQ hitting at sub $0xa8,%rsp, the next frame should be sp+8 instead of 176. It makes unwinder skip correct frame and throw warnings such as "wrong direction" or "can't access registers", etc, depending on the content of the incorrect frame address. By adding the base address ftrace_{regs_}caller with the offset *ip - ops->trampoline*, we can get the correct address to find the ORC entry. Also change "caller" to "tramp_addr" to make variable name conform to its content. [ mingo: Clarified the changelog a bit. ] Fixes: 6be7fa3c74d1 ("ftrace, orc, x86: Handle ftrace dynamically allocated trampolines") Signed-off-by: Chen Zhongjin Signed-off-by: Ingo Molnar Reviewed-by: Steven Rostedt (Google) Cc: Link: https://lore.kernel.org/r/20220819084334.244016-1-chenzhongjin@huawei.com --- arch/x86/kernel/unwind_orc.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 38185aedf7d1..0ea57da92940 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip); static struct orc_entry *orc_ftrace_find(unsigned long ip) { struct ftrace_ops *ops; - unsigned long caller; + unsigned long tramp_addr, offset; ops = ftrace_ops_trampoline(ip); if (!ops) return NULL; + /* Set tramp_addr to the start of the code copied by the trampoline */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) - caller = (unsigned long)ftrace_regs_call; + tramp_addr = (unsigned long)ftrace_regs_caller; else - caller = (unsigned long)ftrace_call; + tramp_addr = (unsigned long)ftrace_caller; + + /* Now place tramp_addr to the location within the trampoline ip is at */ + offset = ip - ops->trampoline; + tramp_addr += offset; /* Prevent unlikely recursion */ - if (ip == caller) + if (ip == tramp_addr) return NULL; - return orc_find(caller); + return orc_find(tramp_addr); } #else static struct orc_entry *orc_ftrace_find(unsigned long ip) -- cgit v1.2.3 From a0a12c3ed057af57552bf6c0aeaca6835693df04 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 19 Aug 2022 12:06:40 -0700 Subject: asm goto: eradicate CC_HAS_ASM_GOTO GCC has supported asm goto since 4.5, and Clang has since version 9.0.0. The minimum supported versions of these tools for the build according to Documentation/process/changes.rst are 5.1 and 11.0.0 respectively. Remove the feature detection script, Kconfig option, and clean up some fallback code that is no longer supported. The removed script was also testing for a GCC specific bug that was fixed in the 4.7 release. Also remove workarounds for bpftrace using clang older than 9.0.0, since other BPF backend fixes are required at this point. Link: https://lore.kernel.org/lkml/CAK7LNATSr=BXKfkdW8f-H5VT_w=xBpT2ZQcZ7rm6JfkdE+QnmA@mail.gmail.com/ Link: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=48637 Acked-by: Borislav Petkov Suggested-by: Masahiro Yamada Suggested-by: Alexei Starovoitov Signed-off-by: Nick Desaulniers Reviewed-by: Ingo Molnar Reviewed-by: Nathan Chancellor Reviewed-by: Alexandre Belloni Signed-off-by: Linus Torvalds --- Documentation/kbuild/kconfig-language.rst | 4 ++-- arch/Kconfig | 3 +-- arch/um/include/asm/cpufeature.h | 15 --------------- arch/x86/Makefile | 4 ---- arch/x86/include/asm/cpufeature.h | 15 --------------- arch/x86/include/asm/rmwcc.h | 6 +++--- arch/x86/kvm/emulate.c | 2 +- init/Kconfig | 4 ---- scripts/gcc-goto.sh | 22 ---------------------- tools/arch/x86/include/asm/rmwcc.h | 21 --------------------- 10 files changed, 7 insertions(+), 89 deletions(-) delete mode 100755 scripts/gcc-goto.sh (limited to 'arch') diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst index 7fb398649f51..858ed5d80def 100644 --- a/Documentation/kbuild/kconfig-language.rst +++ b/Documentation/kbuild/kconfig-language.rst @@ -525,8 +525,8 @@ followed by a test macro:: If you need to expose a compiler capability to makefiles and/or C source files, `CC_HAS_` is the recommended prefix for the config option:: - config CC_HAS_ASM_GOTO - def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) + config CC_HAS_FOO + def_bool $(success,$(srctree)/scripts/cc-check-foo.sh $(CC)) Build as module only ~~~~~~~~~~~~~~~~~~~~ diff --git a/arch/Kconfig b/arch/Kconfig index f330410da63a..5dbf11a5ba4e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -53,7 +53,6 @@ config KPROBES config JUMP_LABEL bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL - depends on CC_HAS_ASM_GOTO select OBJTOOL if HAVE_JUMP_LABEL_HACK help This option enables a transparent branch optimization that @@ -1361,7 +1360,7 @@ config HAVE_PREEMPT_DYNAMIC_CALL config HAVE_PREEMPT_DYNAMIC_KEY bool - depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO + depends on HAVE_ARCH_JUMP_LABEL select HAVE_PREEMPT_DYNAMIC help An architecture should select this if it can handle the preemption diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h index 19cd7ed6ec3c..4b6d1b526bc1 100644 --- a/arch/um/include/asm/cpufeature.h +++ b/arch/um/include/asm/cpufeature.h @@ -65,20 +65,6 @@ extern void setup_clear_cpu_cap(unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) - -/* - * Workaround for the sake of BPF compilation which utilizes kernel - * headers, but clang does not support ASM GOTO and fails the build. - */ -#ifndef __BPF_TRACING__ -#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments" -#endif - -#define static_cpu_has(bit) boot_cpu_has(bit) - -#else - /* * Static testing of CPU features. Used the same as boot_cpu_has(). It * statically patches the target code for additional performance. Use @@ -137,7 +123,6 @@ t_no: boot_cpu_has(bit) : \ _static_cpu_has(bit) \ ) -#endif #define cpu_has_bug(c, bit) cpu_has(c, (bit)) #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 7854685c5f25..bafbd905e6e7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -286,10 +286,6 @@ vdso_install: archprepare: checkbin checkbin: -ifndef CONFIG_CC_HAS_ASM_GOTO - @echo Compiler lacks asm-goto support. - @exit 1 -endif ifdef CONFIG_RETPOLINE ifeq ($(RETPOLINE_CFLAGS),) @echo "You are building kernel with non-retpoline compiler." >&2 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index ea34cc31b047..1a85e1fb0922 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -155,20 +155,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) - -/* - * Workaround for the sake of BPF compilation which utilizes kernel - * headers, but clang does not support ASM GOTO and fails the build. - */ -#ifndef __BPF_TRACING__ -#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments" -#endif - -#define static_cpu_has(bit) boot_cpu_has(bit) - -#else - /* * Static testing of CPU features. Used the same as boot_cpu_has(). It * statically patches the target code for additional performance. Use @@ -208,7 +194,6 @@ t_no: boot_cpu_has(bit) : \ _static_cpu_has(bit) \ ) -#endif #define cpu_has_bug(c, bit) cpu_has(c, (bit)) #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 8a9eba191516..7fa611216417 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -11,7 +11,7 @@ #define __CLOBBERS_MEM(clb...) "memory", ## clb -#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) +#ifndef __GCC_ASM_FLAG_OUTPUTS__ /* Use asm goto */ @@ -27,7 +27,7 @@ cc_label: c = true; \ c; \ }) -#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ +#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ /* Use flags output or a set instruction */ @@ -40,7 +40,7 @@ cc_label: c = true; \ c; \ }) -#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ +#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index f092c54d1a2f..d5ec3a2ed5a4 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -479,7 +479,7 @@ FOP_END; /* * XXX: inoutclob user must know where the argument is being expanded. - * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. + * Using asm goto would allow us to remove _fault. */ #define asm_safe(insn, inoutclob...) \ ({ \ diff --git a/init/Kconfig b/init/Kconfig index 80fe60fa77fb..532362fcfe31 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -70,11 +70,7 @@ config CC_CAN_LINK_STATIC default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag) -static) if 64BIT default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag) -static) -config CC_HAS_ASM_GOTO - def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) - config CC_HAS_ASM_GOTO_OUTPUT - depends on CC_HAS_ASM_GOTO def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null) config CC_HAS_ASM_GOTO_TIED_OUTPUT diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh deleted file mode 100755 index 8b980fb2270a..000000000000 --- a/scripts/gcc-goto.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -# Test for gcc 'asm goto' support -# Copyright (C) 2010, Jason Baron - -cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null -int main(void) -{ -#if defined(__arm__) || defined(__aarch64__) - /* - * Not related to asm goto, but used by jump label - * and broken on some ARM GCC versions (see GCC Bug 48637). - */ - static struct { int dummy; int state; } tp; - asm (".long %c0" :: "i" (&tp.state)); -#endif - -entry: - asm goto ("" :::: entry); - return 0; -} -END diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h index fee7983a90b4..11ff975242ca 100644 --- a/tools/arch/x86/include/asm/rmwcc.h +++ b/tools/arch/x86/include/asm/rmwcc.h @@ -2,8 +2,6 @@ #ifndef _TOOLS_LINUX_ASM_X86_RMWcc #define _TOOLS_LINUX_ASM_X86_RMWcc -#ifdef CONFIG_CC_HAS_ASM_GOTO - #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ @@ -20,23 +18,4 @@ cc_label: \ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) -#else /* !CONFIG_CC_HAS_ASM_GOTO */ - -#define __GEN_RMWcc(fullop, var, cc, ...) \ -do { \ - char c; \ - asm volatile (fullop "; set" cc " %1" \ - : "+m" (var), "=qm" (c) \ - : __VA_ARGS__ : "memory"); \ - return c != 0; \ -} while (0) - -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc) - -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ - __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) - -#endif /* CONFIG_CC_HAS_ASM_GOTO */ - #endif /* _TOOLS_LINUX_ASM_X86_RMWcc */ -- cgit v1.2.3 From 3dcfb729b5f4a0c9b50742865cd5e6c4dbcc80dc Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 19 Aug 2022 19:30:50 +0200 Subject: parisc: Make CONFIG_64BIT available for ARCH=parisc64 only With this patch the ARCH= parameter decides if the CONFIG_64BIT option will be set or not. This means, the ARCH= parameter will give: ARCH=parisc -> 32-bit kernel ARCH=parisc64 -> 64-bit kernel This simplifies the usage of the other config options like randconfig, allmodconfig and allyesconfig a lot and produces the output which is expected for parisc64 (64-bit) vs. parisc (32-bit). Suggested-by: Masahiro Yamada Signed-off-by: Helge Deller Tested-by: Randy Dunlap Reviewed-by: Randy Dunlap Cc: # 5.15+ --- arch/parisc/Kconfig | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7f059cd1196a..9aede2447011 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -146,10 +146,10 @@ menu "Processor type and features" choice prompt "Processor type" - default PA7000 + default PA7000 if "$(ARCH)" = "parisc" config PA7000 - bool "PA7000/PA7100" + bool "PA7000/PA7100" if "$(ARCH)" = "parisc" help This is the processor type of your CPU. This information is used for optimizing purposes. In order to compile a kernel @@ -160,21 +160,21 @@ config PA7000 which is required on some machines. config PA7100LC - bool "PA7100LC" + bool "PA7100LC" if "$(ARCH)" = "parisc" help Select this option for the PCX-L processor, as used in the 712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748, D200, D210, D300, D310 and E-class config PA7200 - bool "PA7200" + bool "PA7200" if "$(ARCH)" = "parisc" help Select this option for the PCX-T' processor, as used in the C100, C110, J100, J110, J210XC, D250, D260, D350, D360, K100, K200, K210, K220, K400, K410 and K420 config PA7300LC - bool "PA7300LC" + bool "PA7300LC" if "$(ARCH)" = "parisc" help Select this option for the PCX-L2 processor, as used in the 744, A180, B132L, B160L, B180L, C132L, C160L, C180L, @@ -224,17 +224,8 @@ config MLONGCALLS Enabling this option will probably slow down your kernel. config 64BIT - bool "64-bit kernel" + def_bool "$(ARCH)" = "parisc64" depends on PA8X00 - help - Enable this if you want to support 64bit kernel on PA-RISC platform. - - At the moment, only people willing to use more than 2GB of RAM, - or having a 64bit-only capable PA-RISC machine should say Y here. - - Since there is no 64bit userland on PA-RISC, there is no point to - enable this option otherwise. The 64bit kernel is significantly bigger - and slower than the 32bit one. choice prompt "Kernel page size" -- cgit v1.2.3 From b4b18f47f4f9682fbf5827682645da7c8dde8f80 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sun, 21 Aug 2022 08:12:19 +0200 Subject: Revert "parisc: Show error if wrong 32/64-bit compiler is being used" This reverts commit b160628e9ebcdc85d0db9d7f423c26b3c7c179d0. There is no need any longer to have this sanity check, because the previous commit ("parisc: Make CONFIG_64BIT available for ARCH=parisc64 only") prevents that CONFIG_64BIT is set if ARCH==parisc. Signed-off-by: Helge Deller --- arch/parisc/include/asm/bitops.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index 56ffd260c669..0ec9cfc5131f 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h @@ -12,14 +12,6 @@ #include #include -/* compiler build environment sanity checks: */ -#if !defined(CONFIG_64BIT) && defined(__LP64__) -#error "Please use 'ARCH=parisc' to build the 32-bit kernel." -#endif -#if defined(CONFIG_64BIT) && !defined(__LP64__) -#error "Please use 'ARCH=parisc64' to build the 64-bit kernel." -#endif - /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion * on use of volatile and __*_bit() (set/clear/change): * *_bit() want use of volatile. -- cgit v1.2.3 From 591d2108f3abc4db9f9073cae37cf3591fd250d6 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sun, 21 Aug 2022 14:49:58 +0200 Subject: parisc: Add runtime check to prevent PA2.0 kernels on PA1.x machines If a 32-bit kernel was compiled for PA2.0 CPUs, it won't be able to run on machines with PA1.x CPUs. Add a check and bail out early if a PA1.x machine is detected. Signed-off-by: Helge Deller --- arch/parisc/kernel/head.S | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index e0a9e9657622..fd15fd4bbb61 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -22,7 +22,7 @@ #include #include - .level PA_ASM_LEVEL + .level 1.1 __INITDATA ENTRY(boot_args) @@ -70,6 +70,47 @@ $bss_loop: stw,ma %arg2,4(%r1) stw,ma %arg3,4(%r1) +#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) + /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU + * and halt kernel if we detect a PA1.x CPU. */ + ldi 32,%r10 + mtctl %r10,%cr11 + .level 2.0 + mfctl,w %cr11,%r10 + .level 1.1 + comib,<>,n 0,%r10,$cpu_ok + + load32 PA(msg1),%arg0 + ldi msg1_end-msg1,%arg1 +$iodc_panic: + copy %arg0, %r10 + copy %arg1, %r11 + load32 PA(init_stack),%sp +#define MEM_CONS 0x3A0 + ldw MEM_CONS+32(%r0),%arg0 // HPA + ldi ENTRY_IO_COUT,%arg1 + ldw MEM_CONS+36(%r0),%arg2 // SPA + ldw MEM_CONS+8(%r0),%arg3 // layers + load32 PA(__bss_start),%r1 + stw %r1,-52(%sp) // arg4 + stw %r0,-56(%sp) // arg5 + stw %r10,-60(%sp) // arg6 = ptr to text + stw %r11,-64(%sp) // arg7 = len + stw %r0,-68(%sp) // arg8 + load32 PA(.iodc_panic_ret), %rp + ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC + bv,n (%r1) +.iodc_panic_ret: + b . /* wait endless with ... */ + or %r10,%r10,%r10 /* qemu idle sleep */ +msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" +msg1_end: + +$cpu_ok: +#endif + + .level PA_ASM_LEVEL + /* Initialize startup VM. Just map first 16/32 MB of memory */ load32 PA(swapper_pg_dir),%r4 mtctl %r4,%cr24 /* Initialize kernel root pointer */ -- cgit v1.2.3 From 729a916599eaf13df66ae5404f5489f38518ea8c Mon Sep 17 00:00:00 2001 From: Kuan-Ying Lee Date: Thu, 21 Jul 2022 11:05:31 +0800 Subject: arm64: Fix comment typo Replace wrong 'FIQ EL1h' comment with 'FIQ EL1t'. Signed-off-by: Kuan-Ying Lee Link: https://lore.kernel.org/r/20220721030531.21234-1-Kuan-Ying.Lee@mediatek.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 254fe31c03a0..2d73b3e793b2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -502,7 +502,7 @@ tsk .req x28 // current thread_info SYM_CODE_START(vectors) kernel_ventry 1, t, 64, sync // Synchronous EL1t kernel_ventry 1, t, 64, irq // IRQ EL1t - kernel_ventry 1, t, 64, fiq // FIQ EL1h + kernel_ventry 1, t, 64, fiq // FIQ EL1t kernel_ventry 1, t, 64, error // Error EL1t kernel_ventry 1, h, 64, sync // Synchronous EL1h -- cgit v1.2.3 From 2e8cff0a0eee87b27f0cf87ad8310eb41b5886ab Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 17 Aug 2022 16:40:22 +0100 Subject: arm64: fix rodata=full On arm64, "rodata=full" has been suppored (but not documented) since commit: c55191e96caa9d78 ("arm64: mm: apply r/o permissions of VM areas to its linear alias as well") As it's necessary to determine the rodata configuration early during boot, arm64 has an early_param() handler for this, whereas init/main.c has a __setup() handler which is run later. Unfortunately, this split meant that since commit: f9a40b0890658330 ("init/main.c: return 1 from handled __setup() functions") ... passing "rodata=full" would result in a spurious warning from the __setup() handler (though RO permissions would be configured appropriately). Further, "rodata=full" has been broken since commit: 0d6ea3ac94ca77c5 ("lib/kstrtox.c: add "false"/"true" support to kstrtobool()") ... which caused strtobool() to parse "full" as false (in addition to many other values not documented for the "rodata=" kernel parameter. This patch fixes this breakage by: * Moving the core parameter parser to an __early_param(), such that it is available early. * Adding an (optional) arch hook which arm64 can use to parse "full". * Updating the documentation to mention that "full" is valid for arm64. * Having the core parameter parser handle "on" and "off" explicitly, such that any undocumented values (e.g. typos such as "ful") are reported as errors rather than being silently accepted. Note that __setup() and early_param() have opposite conventions for their return values, where __setup() uses 1 to indicate a parameter was handled and early_param() uses 0 to indicate a parameter was handled. Fixes: f9a40b089065 ("init/main.c: return 1 from handled __setup() functions") Fixes: 0d6ea3ac94ca ("lib/kstrtox.c: add "false"/"true" support to kstrtobool()") Signed-off-by: Mark Rutland Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Jagdish Gediya Cc: Matthew Wilcox Cc: Randy Dunlap Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220817154022.3974645-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- Documentation/admin-guide/kernel-parameters.txt | 2 ++ arch/arm64/include/asm/setup.h | 17 +++++++++++++++++ arch/arm64/mm/mmu.c | 18 ------------------ init/main.c | 18 +++++++++++++++--- 4 files changed, 34 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d7f30902fda0..426fa892d311 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5331,6 +5331,8 @@ rodata= [KNL] on Mark read-only kernel memory as read-only (default). off Leave read-only kernel memory writable for debugging. + full Mark read-only kernel memory and aliases as read-only + [arm64] rockchip.usb_uart Enable the uart passthrough on the designated usb port diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h index 6437df661700..f4af547ef54c 100644 --- a/arch/arm64/include/asm/setup.h +++ b/arch/arm64/include/asm/setup.h @@ -3,6 +3,8 @@ #ifndef __ARM64_ASM_SETUP_H #define __ARM64_ASM_SETUP_H +#include + #include void *get_early_fdt_ptr(void); @@ -14,4 +16,19 @@ void early_fdt_map(u64 dt_phys); extern phys_addr_t __fdt_pointer __initdata; extern u64 __cacheline_aligned boot_args[4]; +static inline bool arch_parse_debug_rodata(char *arg) +{ + extern bool rodata_enabled; + extern bool rodata_full; + + if (arg && !strcmp(arg, "full")) { + rodata_enabled = true; + rodata_full = true; + return true; + } + + return false; +} +#define arch_parse_debug_rodata arch_parse_debug_rodata + #endif diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index db7c4e6ae57b..e7ad44585f40 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -642,24 +642,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, vm_area_add_early(vma); } -static int __init parse_rodata(char *arg) -{ - int ret = strtobool(arg, &rodata_enabled); - if (!ret) { - rodata_full = false; - return 0; - } - - /* permit 'full' in addition to boolean options */ - if (strcmp(arg, "full")) - return -EINVAL; - - rodata_enabled = true; - rodata_full = true; - return 0; -} -early_param("rodata", parse_rodata); - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static int __init map_entry_trampoline(void) { diff --git a/init/main.c b/init/main.c index 91642a4e69be..1fe7942f5d4a 100644 --- a/init/main.c +++ b/init/main.c @@ -1446,13 +1446,25 @@ static noinline void __init kernel_init_freeable(void); #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) bool rodata_enabled __ro_after_init = true; + +#ifndef arch_parse_debug_rodata +static inline bool arch_parse_debug_rodata(char *str) { return false; } +#endif + static int __init set_debug_rodata(char *str) { - if (strtobool(str, &rodata_enabled)) + if (arch_parse_debug_rodata(str)) + return 0; + + if (str && !strcmp(str, "on")) + rodata_enabled = true; + else if (str && !strcmp(str, "off")) + rodata_enabled = false; + else pr_warn("Invalid option string for rodata: '%s'\n", str); - return 1; + return 0; } -__setup("rodata=", set_debug_rodata); +early_param("rodata", set_debug_rodata); #endif #ifdef CONFIG_STRICT_KERNEL_RWX -- cgit v1.2.3 From e89d120c4b720e232cc6a94f0fcbd59c15d41489 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Fri, 19 Aug 2022 11:30:50 +0100 Subject: arm64: errata: add detection for AMEVCNTR01 incrementing incorrectly The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate as the system counter. On affected Cortex-A510 cores, AMEVCNTR01 increments incorrectly giving a significantly higher output value. This results in inaccurate task scheduler utilization tracking and incorrect feedback on CPU frequency. Work around this problem by returning 0 when reading the affected counter in key locations that results in disabling all users of this counter from using it either for frequency invariance or as FFH reference counter. This effect is the same to firmware disabling affected counters. Details on how the two features are affected by this erratum: - AMU counters will not be used for frequency invariance for affected CPUs and CPUs in the same cpufreq policy. AMUs can still be used for frequency invariance for unaffected CPUs in the system. Although unlikely, if no alternative method can be found to support frequency invariance for affected CPUs (cpufreq based or solution based on platform counters) frequency invariance will be disabled. Please check the chapter on frequency invariance at Documentation/scheduler/sched-capacity.rst for details of its effect. - Given that FFH can be used to fetch either the core or constant counter values, restrictions are lifted regarding any of these counters returning a valid (!0) value. Therefore FFH is considered supported if there is a least one CPU that support AMUs, independent of any counters being disabled or affected by this erratum. Clarifying comments are now added to the cpc_ffh_supported(), cpu_read_constcnt() and cpu_read_corecnt() functions. The above is achieved through adding a new erratum: ARM64_ERRATUM_2457168. Signed-off-by: Ionela Voinescu Reviewed-by: Catalin Marinas Cc: Catalin Marinas Cc: Will Deacon Cc: James Morse Link: https://lore.kernel.org/r/20220819103050.24211-1-ionela.voinescu@arm.com Signed-off-by: Will Deacon --- Documentation/arm64/silicon-errata.rst | 2 ++ arch/arm64/Kconfig | 17 +++++++++++++++++ arch/arm64/kernel/cpu_errata.c | 10 ++++++++++ arch/arm64/kernel/cpufeature.c | 5 ++++- arch/arm64/kernel/topology.c | 32 ++++++++++++++++++++++++++++++-- arch/arm64/tools/cpucaps | 1 + 6 files changed, 64 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 33b04db8408f..fda97b3fcf01 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -52,6 +52,8 @@ stable kernels. | Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 571cc234d0b3..9fb9fff08c94 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -917,6 +917,23 @@ config ARM64_ERRATUM_1902691 If unsure, say Y. +config ARM64_ERRATUM_2457168 + bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" + depends on ARM64_AMU_EXTN + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2457168. + + The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate + as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments + incorrectly giving a significantly higher output value. + + Work around this problem by returning 0 when reading the affected counter in + key locations that results in disabling all users of this counter. This effect + is the same to firmware disabling affected counters. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0f7e9087d900..53b973b6059f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -656,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) }, #endif +#ifdef CONFIG_ARM64_ERRATUM_2457168 + { + .desc = "ARM erratum 2457168", + .capability = ARM64_WORKAROUND_2457168, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + + /* Cortex-A510 r0p0-r1p1 */ + CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) + }, +#endif #ifdef CONFIG_ARM64_ERRATUM_2038923 { .desc = "ARM erratum 2038923", diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 907401e4fffb..af4de817d712 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1870,7 +1870,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); - update_freq_counters_refs(); + + /* 0 reference values signal broken/disabled counters */ + if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) + update_freq_counters_refs(); } } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 869ffc4d4484..ad2bfc794257 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -296,12 +296,25 @@ core_initcall(init_amu_fie); static void cpu_read_corecnt(void *val) { + /* + * A value of 0 can be returned if the current CPU does not support AMUs + * or if the counter is disabled for this CPU. A return value of 0 at + * counter read is properly handled as an error case by the users of the + * counter. + */ *(u64 *)val = read_corecnt(); } static void cpu_read_constcnt(void *val) { - *(u64 *)val = read_constcnt(); + /* + * Return 0 if the current CPU is affected by erratum 2457168. A value + * of 0 is also returned if the current CPU does not support AMUs or if + * the counter is disabled. A return value of 0 at counter read is + * properly handled as an error case by the users of the counter. + */ + *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ? + 0UL : read_constcnt(); } static inline @@ -328,7 +341,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) */ bool cpc_ffh_supported(void) { - return freq_counters_valid(get_cpu_with_amu_feat()); + int cpu = get_cpu_with_amu_feat(); + + /* + * FFH is considered supported if there is at least one present CPU that + * supports AMUs. Using FFH to read core and reference counters for CPUs + * that do not support AMUs, have counters disabled or that are affected + * by errata, will result in a return value of 0. + * + * This is done to allow any enabled and valid counters to be read + * through FFH, knowing that potentially returning 0 as counter value is + * properly handled by the users of these counters. + */ + if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) + return false; + + return true; } int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 779653771507..63b2484ce6c3 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -67,6 +67,7 @@ WORKAROUND_1902691 WORKAROUND_2038923 WORKAROUND_2064142 WORKAROUND_2077057 +WORKAROUND_2457168 WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -- cgit v1.2.3 From e75d18cecbb3805895d8ed64da4f78575ec96043 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 8 Aug 2022 09:46:40 +0100 Subject: arm64: cacheinfo: Fix incorrect assignment of signed error value to unsigned fw_level Though acpi_find_last_cache_level() always returned signed value and the document states it will return any errors caused by lack of a PPTT table, it never returned negative values before. Commit 0c80f9e165f8 ("ACPI: PPTT: Leave the table mapped for the runtime usage") however changed it by returning -ENOENT if no PPTT was found. The value returned from acpi_find_last_cache_level() is then assigned to unsigned fw_level. It will result in the number of cache leaves calculated incorrectly as a huge value which will then cause the following warning from __alloc_pages as the order would be great than MAX_ORDER because of incorrect and huge cache leaves value. | WARNING: CPU: 0 PID: 1 at mm/page_alloc.c:5407 __alloc_pages+0x74/0x314 | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-10393-g7c2a8d3ac4c0 #73 | pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : __alloc_pages+0x74/0x314 | lr : alloc_pages+0xe8/0x318 | Call trace: | __alloc_pages+0x74/0x314 | alloc_pages+0xe8/0x318 | kmalloc_order_trace+0x68/0x1dc | __kmalloc+0x240/0x338 | detect_cache_attributes+0xe0/0x56c | update_siblings_masks+0x38/0x284 | store_cpu_topology+0x78/0x84 | smp_prepare_cpus+0x48/0x134 | kernel_init_freeable+0xc4/0x14c | kernel_init+0x2c/0x1b4 | ret_from_fork+0x10/0x20 Fix the same by changing fw_level to be signed integer and return the error from init_cache_level() early in case of error. Reported-and-Tested-by: Bruno Goncalves Signed-off-by: Sudeep Holla Link: https://lore.kernel.org/r/20220808084640.3165368-1-sudeep.holla@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/cacheinfo.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 587543c6c51c..97c42be71338 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { - unsigned int ctype, level, leaves, fw_level; + unsigned int ctype, level, leaves; + int fw_level; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu) else fw_level = acpi_find_last_cache_level(cpu); + if (fw_level < 0) + return fw_level; + if (level < fw_level) { /* * some external caches not specified in CLIDR_EL1 -- cgit v1.2.3 From 02e483f8d414cc4f467389843d61b63bbbbcd892 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 18 Aug 2022 22:36:11 +0100 Subject: arm64/sysreg: Directly include bitfield.h The SYS_FIELD_ macros in sysreg.h use definitions from bitfield.h but there is no direct inclusion of it, add one to ensure that sysreg.h is directly usable. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220818213613.733091-2-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7c71358d44c4..c67002677015 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1116,6 +1116,7 @@ #else +#include #include #include #include -- cgit v1.2.3 From a10edea4efbba4e8756f493781e953dd3592f24a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 18 Aug 2022 22:36:12 +0100 Subject: arm64/sysreg: Guard SYS_FIELD_ macros for asm The SYS_FIELD_ macros are not safe for assembly contexts, move them inside the guarded section. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220818213613.733091-3-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c67002677015..818df938a7ad 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1210,8 +1210,6 @@ par; \ }) -#endif - #define SYS_FIELD_GET(reg, field, val) \ FIELD_GET(reg##_##field##_MASK, val) @@ -1221,4 +1219,6 @@ #define SYS_FIELD_PREP_ENUM(reg, field, val) \ FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) +#endif + #endif /* __ASM_SYSREG_H */ -- cgit v1.2.3 From 53d2d84a1f6d68e036adab71df8e0f37cd2724f6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 18 Aug 2022 22:36:13 +0100 Subject: arm64/cache: Fix cache_type_cwg() for register generation Ard noticed that since we converted CTR_EL0 to automatic generation we have been seeing errors on some systems handling the value of cache_type_cwg() such as CPU features: No Cache Writeback Granule information, assuming 128 This is because the manual definition of CTR_EL0_CWG_MASK was done without a shift while our convention is to define the mask after shifting. This means that the user in cache_type_cwg() was broken as it was written for the manually written shift then mask. Fix this by converting to use SYS_FIELD_GET(). The only other field where the _MASK for this register is used is IminLine which is at offset 0 so unaffected. Fixes: 9a3634d02301 ("arm64/sysreg: Convert CTR_EL0 to automatic generation") Reported-by: Ard Biesheuvel Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220818213613.733091-4-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/cache.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index ca9b487112cc..34256bda0da9 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -71,7 +71,7 @@ static __always_inline int icache_is_vpipt(void) static inline u32 cache_type_cwg(void) { - return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK; + return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype()); } #define __read_mostly __section(".data..read_mostly") -- cgit v1.2.3 From 7ddcaf78e93c9282b4d92184f511b4d5bee75355 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 17 Aug 2022 19:23:21 +0100 Subject: arm64/signal: Raise limit on stack frames The signal code has a limit of 64K on the size of a stack frame that it will generate, if this limit is exceeded then a process will be killed if it receives a signal. Unfortunately with the advent of SME this limit is too small - the maximum possible size of the ZA register alone is 64K. This is not an issue for practical systems at present but is easily seen using virtual platforms. Raise the limit to 256K, this is substantially more than could be used by any current architecture extension. Signed-off-by: Mark Brown Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20220817182324.638214-2-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 3e6d0352d7d3..0685b8784927 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -91,7 +91,7 @@ static size_t sigframe_size(struct rt_sigframe_user_layout const *user) * not taken into account. This limit is not a guarantee and is * NOT ABI. */ -#define SIGFRAME_MAXSZ SZ_64K +#define SIGFRAME_MAXSZ SZ_256K static int __sigframe_alloc(struct rt_sigframe_user_layout *user, unsigned long *offset, size_t size, bool extend) -- cgit v1.2.3 From ea64baacbc36a0d552aec0d87107182f40211131 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 17 Aug 2022 19:23:22 +0100 Subject: arm64/signal: Flush FPSIMD register state when disabling streaming mode When handling a signal delivered to a context with streaming mode enabled we will disable streaming mode for the signal handler, when doing so we should also flush the saved FPSIMD register state like exiting streaming mode in the hardware would do so that if that state is reloaded we get the same behaviour. Without this we will reload whatever the last FPSIMD state that was saved for the task was. Fixes: 40a8e87bb328 ("arm64/sme: Disable ZA and streaming mode when handling signals") Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220817182324.638214-3-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/signal.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0685b8784927..0d28a783a69a 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -926,6 +926,16 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* Signal handlers are invoked with ZA and streaming mode disabled */ if (system_supports_sme()) { + /* + * If we were in streaming mode the saved register + * state was SVE but we will exit SM and use the + * FPSIMD register state - flush the saved FPSIMD + * register state in case it gets loaded. + */ + if (current->thread.svcr & SVCR_SM_MASK) + memset(¤t->thread.uw.fpsimd_state, 0, + sizeof(current->thread.uw.fpsimd_state)); + current->thread.svcr &= ~(SVCR_ZA_MASK | SVCR_SM_MASK); sme_smstop(); -- cgit v1.2.3 From 826a4fdd2ada9e5923c58bdd168f31a42e958ffc Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 17 Aug 2022 19:23:23 +0100 Subject: arm64/sme: Don't flush SVE register state when allocating SME storage Currently when taking a SME access trap we allocate storage for the SVE register state in order to be able to handle storage of streaming mode SVE. Due to the original usage in a purely SVE context the SVE register state allocation this also flushes the register state for SVE if storage was already allocated but in the SME context this is not desirable. For a SME access trap to be taken the task must not be in streaming mode so either there already is SVE register state present for regular SVE mode which would be corrupted or the task does not have TIF_SVE and the flush is redundant. Fix this by adding a flag to sve_alloc() indicating if we are in a SVE context and need to flush the state. Freshly allocated storage is always zeroed either way. Fixes: 8bd7f91c03d8 ("arm64/sme: Implement traps and syscall handling for SME") Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220817182324.638214-4-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/fpsimd.h | 4 ++-- arch/arm64/kernel/fpsimd.c | 10 ++++++---- arch/arm64/kernel/ptrace.c | 6 +++--- arch/arm64/kernel/signal.c | 2 +- 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 9bb1873f5295..6f86b7ab6c28 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -153,7 +153,7 @@ struct vl_info { #ifdef CONFIG_ARM64_SVE -extern void sve_alloc(struct task_struct *task); +extern void sve_alloc(struct task_struct *task, bool flush); extern void fpsimd_release_task(struct task_struct *task); extern void fpsimd_sync_to_sve(struct task_struct *task); extern void fpsimd_force_sync_to_sve(struct task_struct *task); @@ -256,7 +256,7 @@ size_t sve_state_size(struct task_struct const *task); #else /* ! CONFIG_ARM64_SVE */ -static inline void sve_alloc(struct task_struct *task) { } +static inline void sve_alloc(struct task_struct *task, bool flush) { } static inline void fpsimd_release_task(struct task_struct *task) { } static inline void sve_sync_to_fpsimd(struct task_struct *task) { } static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index dd63ffc3a2fa..b9ae9827e6e8 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -715,10 +715,12 @@ size_t sve_state_size(struct task_struct const *task) * do_sve_acc() case, there is no ABI requirement to hide stale data * written previously be task. */ -void sve_alloc(struct task_struct *task) +void sve_alloc(struct task_struct *task, bool flush) { if (task->thread.sve_state) { - memset(task->thread.sve_state, 0, sve_state_size(task)); + if (flush) + memset(task->thread.sve_state, 0, + sve_state_size(task)); return; } @@ -1388,7 +1390,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs) return; } - sve_alloc(current); + sve_alloc(current, true); if (!current->thread.sve_state) { force_sig(SIGKILL); return; @@ -1439,7 +1441,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) return; } - sve_alloc(current); + sve_alloc(current, false); sme_alloc(current); if (!current->thread.sve_state || !current->thread.za_state) { force_sig(SIGKILL); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 21da83187a60..eb7c08dfb834 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -882,7 +882,7 @@ static int sve_set_common(struct task_struct *target, * state and ensure there's storage. */ if (target->thread.svcr != old_svcr) - sve_alloc(target); + sve_alloc(target, true); } /* Registers: FPSIMD-only case */ @@ -912,7 +912,7 @@ static int sve_set_common(struct task_struct *target, goto out; } - sve_alloc(target); + sve_alloc(target, true); if (!target->thread.sve_state) { ret = -ENOMEM; clear_tsk_thread_flag(target, TIF_SVE); @@ -1082,7 +1082,7 @@ static int za_set(struct task_struct *target, /* Ensure there is some SVE storage for streaming mode */ if (!target->thread.sve_state) { - sve_alloc(target); + sve_alloc(target, false); if (!target->thread.sve_state) { clear_thread_flag(TIF_SME); ret = -ENOMEM; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0d28a783a69a..9ad911f1647c 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -310,7 +310,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) fpsimd_flush_task_state(current); /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ - sve_alloc(current); + sve_alloc(current, true); if (!current->thread.sve_state) { clear_thread_flag(TIF_SVE); return -ENOMEM; -- cgit v1.2.3 From 714f3cbd70a4db9f9b7fe5b8a032896ed33fb824 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 17 Aug 2022 19:23:24 +0100 Subject: arm64/sme: Don't flush SVE register state when handling SME traps Currently as part of handling a SME access trap we flush the SVE register state. This is not needed and would corrupt register state if the task has access to the SVE registers already. For non-streaming mode accesses the required flushing will be done in the SVE access trap. For streaming mode SVE register accesses the architecture guarantees that the register state will be flushed when streaming mode is entered or exited so there is no need for us to do so. Simply remove the register initialisation. Fixes: 8bd7f91c03d8 ("arm64/sme: Implement traps and syscall handling for SME") Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220817182324.638214-5-broonie@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index b9ae9827e6e8..23834d96d1e7 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1462,17 +1462,6 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) fpsimd_bind_task_to_cpu(); } - /* - * If SVE was not already active initialise the SVE registers, - * any non-shared state between the streaming and regular SVE - * registers is architecturally guaranteed to be zeroed when - * we enter streaming mode. We do not need to initialize ZA - * since ZA must be disabled at this point and enabling ZA is - * architecturally defined to zero ZA. - */ - if (system_supports_sve() && !test_thread_flag(TIF_SVE)) - sve_init_regs(); - put_cpu_fpsimd_context(); } -- cgit v1.2.3 From ea902bcc1943f7539200ec464de3f54335588774 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 23 Aug 2022 10:48:19 -0700 Subject: x86/cpu: Add new Raptor Lake CPU model number Note1: Model 0xB7 already claimed the "no suffix" #define for a regular client part, so add (yet another) suffix "S" to distinguish this new part from the earlier one. Note2: the RAPTORLAKE* and ALDERLAKE* processors are very similar from a software enabling point of view. There are no known features that have model-specific enabling and also differ between the two. In other words, every single place that list *one* or more RAPTORLAKE* or ALDERLAKE* processors should list all of them. Note3: This is being merged before there is an in-tree user. Merging this provides an "anchor" so that the different folks can update their subsystems (like perf) in parallel to use this define and test it. [ dhansen: add a note about why this has no in-tree users yet ] Signed-off-by: Tony Luck Signed-off-by: Dave Hansen Link: https://lkml.kernel.org/r/20220823174819.223941-1-tony.luck@intel.com --- arch/x86/include/asm/intel-family.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index def6ca121111..aeb38023a703 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -27,6 +27,7 @@ * _X - regular server parts * _D - micro server parts * _N,_P - other mobile parts + * _S - other client parts * * Historical OPTDIFFs: * @@ -112,6 +113,7 @@ #define INTEL_FAM6_RAPTORLAKE 0xB7 #define INTEL_FAM6_RAPTORLAKE_P 0xBA +#define INTEL_FAM6_RAPTORLAKE_S 0xBF /* "Small Core" Processors (Atom) */ -- cgit v1.2.3 From 3f67e69976035352db110443916bcce32c7f64ac Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Sat, 20 Aug 2022 00:14:13 +0100 Subject: riscv: dts: microchip: mpfs: fix incorrect pcie child node name Recent versions of dt-schema complain about the PCIe controller's child node name: arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dtb: pcie@2000000000: Unevaluated properties are not allowed ('clock-names', 'clocks', 'legacy-interrupt-controller', 'microchip,axi-m-atr0' were unexpected) From schema: Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml Make the dts match the correct property name in the dts. Fixes: 528a5b1f2556 ("riscv: dts: microchip: add new peripherals to icicle kit device tree") Signed-off-by: Conor Dooley --- arch/riscv/boot/dts/microchip/mpfs.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi index 0a6ad5b9ff8d..b26fc7886e5d 100644 --- a/arch/riscv/boot/dts/microchip/mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi @@ -487,7 +487,7 @@ msi-controller; microchip,axi-m-atr0 = <0x10 0x0>; status = "disabled"; - pcie_intc: legacy-interrupt-controller { + pcie_intc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; interrupt-controller; -- cgit v1.2.3 From 72a05748cbd285567d69f173f8694e3471b79f20 Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Sat, 20 Aug 2022 00:14:14 +0100 Subject: riscv: dts: microchip: mpfs: remove ti,fifo-depth property Recent versions of dt-schema warn about a previously undetected undocument property on the icicle & polarberry devicetrees: arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dtb: ethernet@20112000: ethernet-phy@8: Unevaluated properties are not allowed ('ti,fifo-depth' was unexpected) From schema: Documentation/devicetree/bindings/net/cdns,macb.yaml I know what you're thinking, the binding doesn't look to be the problem and I agree. I am not sure why a TI vendor property was ever actually added since it has no meaning... just get rid of it. Fixes: bc47b2217f24 ("riscv: dts: microchip: add the sundance polarberry") Fixes: 0fa6107eca41 ("RISC-V: Initial DTS for Microchip ICICLE board") Signed-off-by: Conor Dooley --- arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts | 2 -- arch/riscv/boot/dts/microchip/mpfs-polarberry.dts | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts index 044982a11df5..ee548ab61a2a 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts +++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts @@ -84,12 +84,10 @@ phy1: ethernet-phy@9 { reg = <9>; - ti,fifo-depth = <0x1>; }; phy0: ethernet-phy@8 { reg = <8>; - ti,fifo-depth = <0x1>; }; }; diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts index 82c93c8f5c17..dc11bb8fc833 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts +++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts @@ -54,12 +54,10 @@ phy1: ethernet-phy@5 { reg = <5>; - ti,fifo-depth = <0x01>; }; phy0: ethernet-phy@4 { reg = <4>; - ti,fifo-depth = <0x01>; }; }; -- cgit v1.2.3 From 2b55915d27dcaa35f54bad7925af0a76001079bc Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Sat, 20 Aug 2022 00:14:15 +0100 Subject: riscv: dts: microchip: mpfs: remove bogus card-detect-delay Recent versions of dt-schema warn about a previously undetected undocumented property: arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dtb: mmc@20008000: Unevaluated properties are not allowed ('card-detect-delay' was unexpected) From schema: Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml There are no GPIOs connected to MSSIO6B4 pin K3 so adding the common cd-debounce-delay-ms property makes no sense. The Cadence IP has a register that sets the card detect delay as "DP * tclk". On MPFS, this clock frequency is not configurable (it must be 200 MHz) & the FPGA comes out of reset with this register already set. Fixes: bc47b2217f24 ("riscv: dts: microchip: add the sundance polarberry") Fixes: 0fa6107eca41 ("RISC-V: Initial DTS for Microchip ICICLE board") Signed-off-by: Conor Dooley --- arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts | 1 - arch/riscv/boot/dts/microchip/mpfs-polarberry.dts | 1 - 2 files changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts index ee548ab61a2a..f3f87ed2007f 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts +++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts @@ -100,7 +100,6 @@ disable-wp; cap-sd-highspeed; cap-mmc-highspeed; - card-detect-delay = <200>; mmc-ddr-1_8v; mmc-hs200-1_8v; sd-uhs-sdr12; diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts index dc11bb8fc833..c87cc2d8fe29 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts +++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts @@ -70,7 +70,6 @@ disable-wp; cap-sd-highspeed; cap-mmc-highspeed; - card-detect-delay = <200>; mmc-ddr-1_8v; mmc-hs200-1_8v; sd-uhs-sdr12; -- cgit v1.2.3 From e4009c5fa77b4356aa37ce002e9f9952dfd7a615 Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Sat, 20 Aug 2022 00:14:16 +0100 Subject: riscv: dts: microchip: mpfs: remove pci axi address translation property An AXI master address translation table property was inadvertently added to the device tree & this was not caught by dtbs_check at the time. Remove the property - it should not be in mpfs.dtsi anyway as it would be more suitable in -fabric.dtsi nor does it actually apply to the version of the reference design we are using for upstream. Link: https://www.microsemi.com/document-portal/doc_download/1245812-polarfire-fpga-and-polarfire-soc-fpga-pci-express-user-guide # Section 1.3.3 Fixes: 528a5b1f2556 ("riscv: dts: microchip: add new peripherals to icicle kit device tree") Signed-off-by: Conor Dooley --- arch/riscv/boot/dts/microchip/mpfs.dtsi | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi index b26fc7886e5d..74493344ea41 100644 --- a/arch/riscv/boot/dts/microchip/mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi @@ -485,7 +485,6 @@ ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>; msi-parent = <&pcie>; msi-controller; - microchip,axi-m-atr0 = <0x10 0x0>; status = "disabled"; pcie_intc: interrupt-controller { #address-cells = <0>; -- cgit v1.2.3 From 4b1c742407571eff58b6de9881889f7ca7c4b4dc Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Tue, 23 Aug 2022 11:07:34 -0500 Subject: x86/boot: Don't propagate uninitialized boot_params->cc_blob_address In some cases, bootloaders will leave boot_params->cc_blob_address uninitialized rather than zeroing it out. This field is only meant to be set by the boot/compressed kernel in order to pass information to the uncompressed kernel when SEV-SNP support is enabled. Therefore, there are no cases where the bootloader-provided values should be treated as anything other than garbage. Otherwise, the uncompressed kernel may attempt to access this bogus address, leading to a crash during early boot. Normally, sanitize_boot_params() would be used to clear out such fields but that happens too late: sev_enable() may have already initialized it to a valid value that should not be zeroed out. Instead, have sev_enable() zero it out unconditionally beforehand. Also ensure this happens for !CONFIG_AMD_MEM_ENCRYPT as well by also including this handling in the sev_enable() stub function. [ bp: Massage commit message and comments. ] Fixes: b190a043c49a ("x86/sev: Add SEV-SNP feature detection/setup") Reported-by: Jeremi Piotrowski Reported-by: watnuss@gmx.de Signed-off-by: Michael Roth Signed-off-by: Borislav Petkov Cc: stable@vger.kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=216387 Link: https://lore.kernel.org/r/20220823160734.89036-1-michael.roth@amd.com --- arch/x86/boot/compressed/misc.h | 12 +++++++++++- arch/x86/boot/compressed/sev.c | 8 ++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 4910bf230d7b..62208ec04ca4 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -132,7 +132,17 @@ void snp_set_page_private(unsigned long paddr); void snp_set_page_shared(unsigned long paddr); void sev_prep_identity_maps(unsigned long top_level_pgt); #else -static inline void sev_enable(struct boot_params *bp) { } +static inline void sev_enable(struct boot_params *bp) +{ + /* + * bp->cc_blob_address should only be set by boot/compressed kernel. + * Initialize it to 0 unconditionally (thus here in this stub too) to + * ensure that uninitialized values from buggy bootloaders aren't + * propagated. + */ + if (bp) + bp->cc_blob_address = 0; +} static inline void sev_es_shutdown_ghcb(void) { } static inline bool sev_es_check_ghcb_fault(unsigned long address) { diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index 52f989f6acc2..c93930d5ccbd 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -276,6 +276,14 @@ void sev_enable(struct boot_params *bp) struct msr m; bool snp; + /* + * bp->cc_blob_address should only be set by boot/compressed kernel. + * Initialize it to 0 to ensure that uninitialized values from + * buggy bootloaders aren't propagated. + */ + if (bp) + bp->cc_blob_address = 0; + /* * Setup/preliminary detection of SNP. This will be sanity-checked * against CPUID/MSR values later. -- cgit v1.2.3 From cdaa0a407f1acd3a44861e3aea6e3c7349e668f1 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 23 Aug 2022 16:55:51 -0500 Subject: x86/sev: Don't use cc_platform_has() for early SEV-SNP calls When running identity-mapped and depending on the kernel configuration, it is possible that the compiler uses jump tables when generating code for cc_platform_has(). This causes a boot failure because the jump table uses un-mapped kernel virtual addresses, not identity-mapped addresses. This has been seen with CONFIG_RETPOLINE=n. Similar to sme_encrypt_kernel(), use an open-coded direct check for the status of SNP rather than trying to eliminate the jump table. This preserves any code optimization in cc_platform_has() that can be useful post boot. It also limits the changes to SEV-specific files so that future compiler features won't necessarily require possible build changes just because they are not compatible with running identity-mapped. [ bp: Massage commit message. ] Fixes: 5e5ccff60a29 ("x86/sev: Add helper for validating pages in early enc attribute changes") Reported-by: Sean Christopherson Suggested-by: Sean Christopherson Signed-off-by: Tom Lendacky Signed-off-by: Borislav Petkov Cc: # 5.19.x Link: https://lore.kernel.org/all/YqfabnTRxFSM+LoX@google.com/ --- arch/x86/kernel/sev.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 63dc626627a0..4f84c3f11af5 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -701,7 +701,13 @@ e_term: void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned int npages) { - if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + /* + * This can be invoked in early boot while running identity mapped, so + * use an open coded check for SNP instead of using cc_platform_has(). + * This eliminates worries about jump tables or checking boot_cpu_data + * in the cc_platform_has() function. + */ + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) return; /* @@ -717,7 +723,13 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { - if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + /* + * This can be invoked in early boot while running identity mapped, so + * use an open coded check for SNP instead of using cc_platform_has(). + * This eliminates worries about jump tables or checking boot_cpu_data + * in the cc_platform_has() function. + */ + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) return; /* Invalidate the memory pages before they are marked shared in the RMP table. */ -- cgit v1.2.3 From 7c12bb8f59d36507bf70c35dae083dfe9cafc815 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Select PCI_QUIRKS to avoid build error PCI_LOONGSON is a mandatory for LoongArch and it is selected in Kconfig unconditionally, but its dependency PCI_QUIRKS is missing and may cause a build error when "make randconfig": arch/loongarch/pci/acpi.c: In function 'pci_acpi_setup_ecam_mapping': >> arch/loongarch/pci/acpi.c:103:29: error: 'loongson_pci_ecam_ops' undeclared (first use in this function) 103 | ecam_ops = &loongson_pci_ecam_ops; | ^~~~~~~~~~~~~~~~~~~~~ arch/loongarch/pci/acpi.c:103:29: note: each undeclared identifier is reported only once for each function it appears in Kconfig warnings: (for reference only) WARNING: unmet direct dependencies detected for PCI_LOONGSON Depends on [n]: PCI [=y] && (MACH_LOONGSON64 [=y] || COMPILE_TEST [=y]) && (OF [=y] || ACPI [=y]) && PCI_QUIRKS [=n] Selected by [y]: - LOONGARCH [=y] Fix it by selecting PCI_QUIRKS unconditionally, too. Reported-by: kernel test robot Tested-by: Randy Dunlap Signed-off-by: Huacai Chen --- arch/loongarch/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 4abc9a28aba4..26aeb1408e56 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -111,6 +111,7 @@ config LOONGARCH select PCI_ECAM if ACPI select PCI_LOONGSON select PCI_MSI_ARCH_FALLBACKS + select PCI_QUIRKS select PERF_USE_VMALLOC select RTC_LIB select SMP -- cgit v1.2.3 From 84e762060147582912581fd99cb25f3559ec8c22 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Fix build warnings in VDSO Fix build warnings in VDSO as below: arch/loongarch/vdso/vgettimeofday.c:9:5: warning: no previous prototype for '__vdso_clock_gettime' [-Wmissing-prototypes] 9 | int __vdso_clock_gettime(clockid_t clock, | ^~~~~~~~~~~~~~~~~~~~ arch/loongarch/vdso/vgettimeofday.c:15:5: warning: no previous prototype for '__vdso_gettimeofday' [-Wmissing-prototypes] 15 | int __vdso_gettimeofday(struct __kernel_old_timeval *tv, | ^~~~~~~~~~~~~~~~~~~ arch/loongarch/vdso/vgettimeofday.c:21:5: warning: no previous prototype for '__vdso_clock_getres' [-Wmissing-prototypes] 21 | int __vdso_clock_getres(clockid_t clock_id, | ^~~~~~~~~~~~~~~~~~~ arch/loongarch/vdso/vgetcpu.c:27:5: warning: no previous prototype for '__vdso_getcpu' [-Wmissing-prototypes] 27 | int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused) Reported-by: kernel test robot Signed-off-by: Huacai Chen --- arch/loongarch/vdso/vgetcpu.c | 2 ++ arch/loongarch/vdso/vgettimeofday.c | 15 +++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c index 43a0078e4418..e02e775f5360 100644 --- a/arch/loongarch/vdso/vgetcpu.c +++ b/arch/loongarch/vdso/vgetcpu.c @@ -24,6 +24,8 @@ static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void) return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE); } +extern +int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused); int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused) { int cpu_id; diff --git a/arch/loongarch/vdso/vgettimeofday.c b/arch/loongarch/vdso/vgettimeofday.c index b1f4548dae92..8f22863bd7ea 100644 --- a/arch/loongarch/vdso/vgettimeofday.c +++ b/arch/loongarch/vdso/vgettimeofday.c @@ -6,20 +6,23 @@ */ #include -int __vdso_clock_gettime(clockid_t clock, - struct __kernel_timespec *ts) +extern +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { return __cvdso_clock_gettime(clock, ts); } -int __vdso_gettimeofday(struct __kernel_old_timeval *tv, - struct timezone *tz) +extern +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } -int __vdso_clock_getres(clockid_t clock_id, - struct __kernel_timespec *res) +extern +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { return __cvdso_clock_getres(clock_id, res); } -- cgit v1.2.3 From da48b67cfb6b4f115ae652dd5995c56fe2a2cf9b Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Cleanup reset routines with new API Cleanup reset routines by using new do_kernel_power_off() instead of old pm_power_off(), and then simplify the whole file (reset.c) organization by inlining some functions. This cleanup also fix a poweroff error if EFI runtime is disabled. Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/reboot.h | 10 ------ arch/loongarch/kernel/reset.c | 69 +++++++++++-------------------------- 2 files changed, 21 insertions(+), 58 deletions(-) delete mode 100644 arch/loongarch/include/asm/reboot.h (limited to 'arch') diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h deleted file mode 100644 index 51151749d8f0..000000000000 --- a/arch/loongarch/include/asm/reboot.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2020-2022 Loongson Technology Corporation Limited - */ -#ifndef _ASM_REBOOT_H -#define _ASM_REBOOT_H - -extern void (*pm_restart)(void); - -#endif /* _ASM_REBOOT_H */ diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 800c965a17ea..8c82021eb2f4 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -15,10 +15,16 @@ #include #include #include -#include -static void default_halt(void) +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_halt(void) { +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif local_irq_disable(); clear_csr_ecfg(ECFG0_IM); @@ -30,18 +36,29 @@ static void default_halt(void) } } -static void default_poweroff(void) +void machine_power_off(void) { +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_power_off(); #ifdef CONFIG_EFI efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); #endif + while (true) { __arch_cpu_idle(); } } -static void default_restart(void) +void machine_restart(char *command) { +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); #ifdef CONFIG_EFI if (efi_capsule_pending(NULL)) efi_reboot(REBOOT_WARM, NULL); @@ -55,47 +72,3 @@ static void default_restart(void) __arch_cpu_idle(); } } - -void (*pm_restart)(void); -EXPORT_SYMBOL(pm_restart); - -void (*pm_power_off)(void); -EXPORT_SYMBOL(pm_power_off); - -void machine_halt(void) -{ -#ifdef CONFIG_SMP - preempt_disable(); - smp_send_stop(); -#endif - default_halt(); -} - -void machine_power_off(void) -{ -#ifdef CONFIG_SMP - preempt_disable(); - smp_send_stop(); -#endif - pm_power_off(); -} - -void machine_restart(char *command) -{ -#ifdef CONFIG_SMP - preempt_disable(); - smp_send_stop(); -#endif - do_kernel_restart(command); - pm_restart(); -} - -static int __init loongarch_reboot_setup(void) -{ - pm_restart = default_restart; - pm_power_off = default_poweroff; - - return 0; -} - -arch_initcall(loongarch_reboot_setup); -- cgit v1.2.3 From 092e9ebe52a664f9f58e2d24136ae791fe71c6db Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Cleanup headers to avoid circular dependency When enable GENERIC_IOREMAP, there will be circular dependency to cause build errors. The root cause is that pgtable.h shouldn't include io.h but pgtable.h need some macros defined in io.h. So cleanup those macros and remove the unnecessary inclusions, as other architectures do. Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/addrspace.h | 16 ++++++++++++++++ arch/loongarch/include/asm/io.h | 19 ------------------- arch/loongarch/include/asm/page.h | 2 +- arch/loongarch/include/asm/pgtable.h | 7 +++---- arch/loongarch/mm/mmap.c | 11 ++--------- 5 files changed, 22 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index b91e0733b2e5..d342935e5a72 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -109,4 +109,20 @@ extern unsigned long vm_map_base; */ #define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK) +/* + * On LoongArch, I/O ports mappring is following: + * + * | .... | + * |-----------------------| + * | pci io ports(16K~32M) | + * |-----------------------| + * | isa io ports(0 ~16K) | + * PCI_IOBASE ->|-----------------------| + * | .... | + */ +#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) +#define PCI_IOSIZE SZ_32M +#define ISA_IOSIZE SZ_16K +#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) + #endif /* _ASM_ADDRSPACE_H */ diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 884599739b36..999944ea1cea 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -7,34 +7,15 @@ #define ARCH_HAS_IOREMAP_WC -#include #include #include #include -#include -#include #include #include #include #include -/* - * On LoongArch, I/O ports mappring is following: - * - * | .... | - * |-----------------------| - * | pci io ports(64K~32M) | - * |-----------------------| - * | isa io ports(0 ~16K) | - * PCI_IOBASE ->|-----------------------| - * | .... | - */ -#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) -#define PCI_IOSIZE SZ_32M -#define ISA_IOSIZE SZ_16K -#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) - /* * Change "struct page" to physical address. */ diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index a37324ac460b..53f284a96182 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -95,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn) #endif -#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr))) +#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr)) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) extern int __virt_addr_valid(volatile void *kaddr); diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index e03443abaf7d..8ea57e2f0e04 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -59,7 +59,6 @@ #include #include #include -#include struct mm_struct; struct vm_area_struct; @@ -145,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) *p4d = p4dval; } -#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d)) +#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) #endif @@ -188,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) -#define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) +#define pud_phys(pud) PHYSADDR(pud_val(pud)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) #endif @@ -221,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp) #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) -#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) +#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c index 52e40f0ba732..381a569635a9 100644 --- a/arch/loongarch/mm/mmap.c +++ b/arch/loongarch/mm/mmap.c @@ -2,16 +2,9 @@ /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ -#include -#include -#include +#include #include #include -#include -#include -#include -#include -#include unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); @@ -120,6 +113,6 @@ int __virt_addr_valid(volatile void *kaddr) if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base)) return 0; - return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); + return pfn_valid(PFN_DOWN(PHYSADDR(kaddr))); } EXPORT_SYMBOL_GPL(__virt_addr_valid); -- cgit v1.2.3 From 720dc7ab252bbdf404cab7b909e26b31e602bf7e Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Add subword xchg/cmpxchg emulation LoongArch only support 32-bit/64-bit xchg/cmpxchg in native. But percpu operation, qspinlock and some drivers need 8-bit/16-bit xchg/cmpxchg. We add subword xchg/cmpxchg emulation in this patch because the emulation has better performance than the generic implementation (on NUMA system), and it can fix some build errors meanwhile [1]. LoongArch's guarantee for forward progress (avoid many ll/sc happening at the same time and no one succeeds): We have the "exclusive access (with timeout) of ll" feature to avoid simultaneous ll (which also blocks other memory load/store on the same address), and the "random delay of sc" feature to avoid simultaneous sc. It is a mandatory requirement for multi-core LoongArch processors to implement such features, only except those single-core and dual-core processors (they also don't support multi-chip interconnection). Feature bits are introduced in CPUCFG3, bit 3 and bit 4 [2]. [1] https://lore.kernel.org/loongarch/CAAhV-H6vvkuOzy8OemWdYK3taj5Jn3bFX0ZTwE=twM8ywpBUYA@mail.gmail.com/T/#t [2] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg Reported-by: Sudip Mukherjee (Codethink) Suggested-by: Linus Torvalds Signed-off-by: Rui Wang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/cmpxchg.h | 98 +++++++++++++++++++++++++++++++++++- arch/loongarch/include/asm/percpu.h | 8 +++ 2 files changed, 105 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index 0a9b0fac1eee..ae19e33c7754 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -5,8 +5,9 @@ #ifndef __ASM_CMPXCHG_H #define __ASM_CMPXCHG_H -#include +#include #include +#include #define __xchg_asm(amswap_db, m, val) \ ({ \ @@ -21,10 +22,53 @@ __ret; \ }) +static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, + unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask value to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + val &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * exchange within the naturally aligned 4 byte integerthat includes + * it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z5 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift) + : "memory"); + + return (old32 & mask) >> shift; +} + static inline unsigned long __xchg(volatile void *ptr, unsigned long x, int size) { switch (size) { + case 1: + case 2: + return __xchg_small(ptr, x, size); + case 4: return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); @@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, __ret; \ }) +static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old, + unsigned int new, unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask inputs to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + old &= mask; + new &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * compare & exchange within the naturally aligned 4 byte integer + * that includes it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + old <<= shift; + new <<= shift; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " and %1, %0, %z4 \n" + " bne %1, %z5, 2f \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z6 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + " b 3f \n" + "2: \n" + __WEAK_LLSC_MB + "3: \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new) + : "memory"); + + return (old32 & mask) >> shift; +} + static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { + case 1: + case 2: + return __cmpxchg_small(ptr, old, new, size); + case 4: return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)old, new); diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index e6569f18c6dd..0bd6b0110198 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) { switch (size) { + case 1: + case 2: + return __xchg_small((volatile void *)ptr, val, size); + case 4: return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); @@ -204,9 +208,13 @@ do { \ #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) +#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) -- cgit v1.2.3 From b83699ea1e62951857c2d8648bd93a4744899eb7 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: mm: Avoid unnecessary page fault retires on shared memory types Commit d92725256b4f22d0 ("mm: avoid unnecessary page fault retires on shared memory types") modifies do_page_fault() to handle the VM_FAULT_ COMPLETED case, but forget to change for LoongArch, so fix it as other architectures does. Fixes: d92725256b4f22d0 ("mm: avoid unnecessary page fault retires on shared memory types") Reviewed-by: Guo Ren Signed-off-by: Huacai Chen --- arch/loongarch/mm/fault.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index 605579b19a00..1ccd53655cab 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -216,6 +216,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_RETRY)) { flags |= FAULT_FLAG_TRIED; -- cgit v1.2.3 From ab0af755d40f2ea4ef01503e7bebf083f65c9062 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Wed, 17 Aug 2022 06:43:33 +0200 Subject: xen: x86: remove setting the obsolete config XEN_MAX_DOMAIN_MEMORY Commit c70727a5bc18 ("xen: allow more than 512 GB of RAM for 64 bit pv-domains") from July 2015 replaces the config XEN_MAX_DOMAIN_MEMORY with a new config XEN_512GB, but misses to adjust arch/x86/configs/xen.config. As XEN_512GB defaults to yes, there is no need to explicitly set any config in xen.config. Just remove setting the obsolete config XEN_MAX_DOMAIN_MEMORY. Signed-off-by: Lukas Bulwahn Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20220817044333.22310-1-lukas.bulwahn@gmail.com Signed-off-by: Juergen Gross --- arch/x86/configs/xen.config | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/configs/xen.config b/arch/x86/configs/xen.config index d9fc7139fd46..581296255b39 100644 --- a/arch/x86/configs/xen.config +++ b/arch/x86/configs/xen.config @@ -14,7 +14,6 @@ CONFIG_CPU_FREQ=y # x86 xen specific config options CONFIG_XEN_PVH=y -CONFIG_XEN_MAX_DOMAIN_MEMORY=500 CONFIG_XEN_SAVE_RESTORE=y # CONFIG_XEN_DEBUG_FS is not set CONFIG_XEN_MCE_LOG=y -- cgit v1.2.3 From 13cccafe0edcd03bf1c841de8ab8a1c8e34f77d9 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Tue, 16 Aug 2022 11:54:07 -0400 Subject: s390: fix double free of GS and RI CBs on fork() failure The pointers for guarded storage and runtime instrumentation control blocks are stored in the thread_struct of the associated task. These pointers are initially copied on fork() via arch_dup_task_struct() and then cleared via copy_thread() before fork() returns. If fork() happens to fail after the initial task dup and before copy_thread(), the newly allocated task and associated thread_struct memory are freed via free_task() -> arch_release_task_struct(). This results in a double free of the guarded storage and runtime info structs because the fields in the failed task still refer to memory associated with the source task. This problem can manifest as a BUG_ON() in set_freepointer() (with CONFIG_SLAB_FREELIST_HARDENED enabled) or KASAN splat (if enabled) when running trinity syscall fuzz tests on s390x. To avoid this problem, clear the associated pointer fields in arch_dup_task_struct() immediately after the new task is copied. Note that the RI flag is still cleared in copy_thread() because it resides in thread stack memory and that is where stack info is copied. Signed-off-by: Brian Foster Fixes: 8d9047f8b967c ("s390/runtime instrumentation: simplify task exit handling") Fixes: 7b83c6297d2fc ("s390/guarded storage: simplify task exit handling") Cc: # 4.15 Reviewed-by: Gerald Schaefer Reviewed-by: Heiko Carstens Link: https://lore.kernel.org/r/20220816155407.537372-1-bfoster@redhat.com Signed-off-by: Vasily Gorbik --- arch/s390/kernel/process.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 89949b9f3cf8..d5119e039d85 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) memcpy(dst, src, arch_task_struct_size); dst->thread.fpu.regs = dst->thread.fpu.fprs; + + /* + * Don't transfer over the runtime instrumentation or the guarded + * storage control block pointers. These fields are cleared here instead + * of in copy_thread() to avoid premature freeing of associated memory + * on fork() failure. Wait to clear the RI flag because ->stack still + * refers to the source thread. + */ + dst->thread.ri_cb = NULL; + dst->thread.gs_cb = NULL; + dst->thread.gs_bc_cb = NULL; + return 0; } @@ -150,13 +162,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) frame->childregs.flags = 0; if (new_stackp) frame->childregs.gprs[15] = new_stackp; - - /* Don't copy runtime instrumentation info */ - p->thread.ri_cb = NULL; + /* + * Clear the runtime instrumentation flag after the above childregs + * copy. The CB pointer was already cleared in arch_dup_task_struct(). + */ frame->childregs.psw.mask &= ~PSW_MASK_RI; - /* Don't copy guarded storage control block */ - p->thread.gs_cb = NULL; - p->thread.gs_bc_cb = NULL; /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { -- cgit v1.2.3 From 41ac42f137080bc230b5882e3c88c392ab7f2d32 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Wed, 17 Aug 2022 15:26:03 +0200 Subject: s390/mm: do not trigger write fault when vma does not allow VM_WRITE For non-protection pXd_none() page faults in do_dat_exception(), we call do_exception() with access == (VM_READ | VM_WRITE | VM_EXEC). In do_exception(), vma->vm_flags is checked against that before calling handle_mm_fault(). Since commit 92f842eac7ee3 ("[S390] store indication fault optimization"), we call handle_mm_fault() with FAULT_FLAG_WRITE, when recognizing that it was a write access. However, the vma flags check is still only checking against (VM_READ | VM_WRITE | VM_EXEC), and therefore also calling handle_mm_fault() with FAULT_FLAG_WRITE in cases where the vma does not allow VM_WRITE. Fix this by changing access check in do_exception() to VM_WRITE only, when recognizing write access. Link: https://lkml.kernel.org/r/20220811103435.188481-3-david@redhat.com Fixes: 92f842eac7ee3 ("[S390] store indication fault optimization") Cc: Reported-by: David Hildenbrand Reviewed-by: Heiko Carstens Signed-off-by: Gerald Schaefer Signed-off-by: Vasily Gorbik --- arch/s390/mm/fault.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 13449941516c..09b6e756d521 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -379,7 +379,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) flags = FAULT_FLAG_DEFAULT; if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if (access == VM_WRITE || is_write) + if (is_write) + access = VM_WRITE; + if (access == VM_WRITE) flags |= FAULT_FLAG_WRITE; mmap_read_lock(mm); -- cgit v1.2.3 From c93c296fff6b369a7115916145047c8a3db6e27f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 24 Aug 2022 17:13:26 +0200 Subject: x86/sev: Mark snp_abort() noreturn Mark both the function prototype and definition as noreturn in order to prevent the compiler from doing transformations which confuse objtool like so: vmlinux.o: warning: objtool: sme_enable+0x71: unreachable instruction This triggers with gcc-12. Add it and sev_es_terminate() to the objtool noreturn tracking array too. Sort it while at it. Suggested-by: Michael Matz Signed-off-by: Borislav Petkov Acked-by: Peter Zijlstra Link: https://lore.kernel.org/r/20220824152420.20547-1-bp@alien8.de --- arch/x86/include/asm/sev.h | 2 +- arch/x86/kernel/sev.c | 2 +- tools/objtool/check.c | 34 ++++++++++++++++++---------------- 3 files changed, 20 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 4a23e52fe0ee..ebc271bb6d8e 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages); void snp_set_memory_private(unsigned long vaddr, unsigned int npages); void snp_set_wakeup_secondary_cpu(void); bool snp_init(struct boot_params *bp); -void snp_abort(void); +void __init __noreturn snp_abort(void); int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 4f84c3f11af5..a428c62330d3 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -2112,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp) return true; } -void __init snp_abort(void) +void __init __noreturn snp_abort(void) { sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 0cec74da7ffe..ad51689dfb41 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -162,32 +162,34 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, /* * Unfortunately these have to be hard coded because the noreturn - * attribute isn't provided in ELF data. + * attribute isn't provided in ELF data. Keep 'em sorted. */ static const char * const global_noreturns[] = { + "__invalid_creds", + "__module_put_and_kthread_exit", + "__reiserfs_panic", "__stack_chk_fail", - "panic", + "__ubsan_handle_builtin_unreachable", + "cpu_bringup_and_idle", + "cpu_startup_entry", "do_exit", + "do_group_exit", "do_task_dead", - "kthread_exit", - "make_task_dead", - "__module_put_and_kthread_exit", + "ex_handler_msr_mce", + "fortify_panic", "kthread_complete_and_exit", - "__reiserfs_panic", + "kthread_exit", + "kunit_try_catch_throw", "lbug_with_loc", - "fortify_panic", - "usercopy_abort", "machine_real_restart", + "make_task_dead", + "panic", "rewind_stack_and_make_dead", - "kunit_try_catch_throw", - "xen_start_kernel", - "cpu_bringup_and_idle", - "do_group_exit", + "sev_es_terminate", + "snp_abort", "stop_this_cpu", - "__invalid_creds", - "cpu_startup_entry", - "__ubsan_handle_builtin_unreachable", - "ex_handler_msr_mce", + "usercopy_abort", + "xen_start_kernel", }; if (!func) -- cgit v1.2.3 From 8238b4579866b7c1bb99883cfe102a43db5506ff Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 26 Aug 2022 09:17:08 -0400 Subject: wait_on_bit: add an acquire memory barrier There are several places in the kernel where wait_on_bit is not followed by a memory barrier (for example, in drivers/md/dm-bufio.c:new_read). On architectures with weak memory ordering, it may happen that memory accesses that follow wait_on_bit are reordered before wait_on_bit and they may return invalid data. Fix this class of bugs by introducing a new function "test_bit_acquire" that works like test_bit, but has acquire memory ordering semantics. Signed-off-by: Mikulas Patocka Acked-by: Will Deacon Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- Documentation/atomic_bitops.txt | 10 ++++------ arch/x86/include/asm/bitops.h | 21 +++++++++++++++++++++ include/asm-generic/bitops/generic-non-atomic.h | 14 ++++++++++++++ .../asm-generic/bitops/instrumented-non-atomic.h | 12 ++++++++++++ include/asm-generic/bitops/non-atomic.h | 1 + .../bitops/non-instrumented-non-atomic.h | 1 + include/linux/bitops.h | 1 + include/linux/buffer_head.h | 2 +- include/linux/wait_bit.h | 8 ++++---- kernel/sched/wait_bit.c | 2 +- 10 files changed, 60 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt index d8b101c97031..edea4656c5c0 100644 --- a/Documentation/atomic_bitops.txt +++ b/Documentation/atomic_bitops.txt @@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is: - RMW operations that have a return value are fully ordered. - - RMW operations that are conditional are unordered on FAILURE, - otherwise the above rules apply. In the case of test_and_set_bit_lock(), - if the bit in memory is unchanged by the operation then it is deemed to have - failed. + - RMW operations that are conditional are fully ordered. -Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and -clear_bit_unlock() which has RELEASE semantics. +Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics, +clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has +ACQUIRE semantics. Since a platform only has a single means of achieving atomic operations the same barriers as for atomic_t are used, see atomic_t.txt. diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 973c6bd17f98..0fe9de58af31 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } +static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile("testb %2,%1" + CC_SET(nz) + : CC_OUT(nz) (oldbit) + : "m" (((unsigned char *)addr)[nr >> 3]), + "i" (1 << (nr & 7)) + :"memory"); + + return oldbit; +} + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; @@ -226,6 +240,13 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr) variable_test_bit(nr, addr); } +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : + variable_test_bit(nr, addr); +} + /** * __ffs - find first set bit in word * @word: The word to search diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h index 3d5ebd24652b..564a8c675d85 100644 --- a/include/asm-generic/bitops/generic-non-atomic.h +++ b/include/asm-generic/bitops/generic-non-atomic.h @@ -4,6 +4,7 @@ #define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H #include +#include #ifndef _LINUX_BITOPS_H #error only can be included directly @@ -127,6 +128,18 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr) return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); +} + /* * const_*() definitions provide good compile-time optimizations when * the passed arguments can be resolved at compile time. @@ -137,6 +150,7 @@ generic_test_bit(unsigned long nr, const volatile unsigned long *addr) #define const___test_and_set_bit generic___test_and_set_bit #define const___test_and_clear_bit generic___test_and_clear_bit #define const___test_and_change_bit generic___test_and_change_bit +#define const_test_bit_acquire generic_test_bit_acquire /** * const_test_bit - Determine whether a bit is set diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h index 988a3bbfba34..2b238b161a62 100644 --- a/include/asm-generic/bitops/instrumented-non-atomic.h +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -142,4 +142,16 @@ _test_bit(unsigned long nr, const volatile unsigned long *addr) return arch_test_bit(nr, addr); } +/** + * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_bit_acquire(nr, addr); +} + #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h index 5c37ced343ae..71f8d54a5195 100644 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@ -13,6 +13,7 @@ #define arch___test_and_change_bit generic___test_and_change_bit #define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire #include diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h index bdb9b1ffaee9..0ddc78dfc358 100644 --- a/include/asm-generic/bitops/non-instrumented-non-atomic.h +++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h @@ -12,5 +12,6 @@ #define ___test_and_change_bit arch___test_and_change_bit #define _test_bit arch_test_bit +#define _test_bit_acquire arch_test_bit_acquire #endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index cf9bf65039f2..3b89c64bcfd8 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -59,6 +59,7 @@ extern unsigned long __sw_hweight64(__u64 w); #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) #define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) #define test_bit(nr, addr) bitop(_test_bit, nr, addr) +#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr) /* * Include this here because some architectures need generic_ffs/fls in diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index def8b8d30ccc..089c9ade4325 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -156,7 +156,7 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh) * make it consistent with folio_test_uptodate * pairs with smp_mb__before_atomic in set_buffer_uptodate */ - return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; + return test_bit_acquire(BH_Uptodate, &bh->b_state); } #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 7dec36aecbd9..7725b7579b78 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -71,7 +71,7 @@ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, @@ -96,7 +96,7 @@ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, @@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, unsigned long timeout) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, @@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, unsigned mode) { might_sleep(); - if (!test_bit(bit, word)) + if (!test_bit_acquire(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index d4788f810b55..0b1cd985dc27 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_ prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) ret = (*action)(&wbq_entry->key, mode); - } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); + } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); finish_wait(wq_head, &wbq_entry->wq_entry); -- cgit v1.2.3 From 11745ecfe8fea4b4a4c322967a7605d2ecbd5080 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 3 Aug 2022 09:00:31 -0700 Subject: perf/x86/intel/uncore: Fix broken read_counter() for SNB IMC PMU Existing code was generating bogus counts for the SNB IMC bandwidth counters: $ perf stat -a -I 1000 -e uncore_imc/data_reads/,uncore_imc/data_writes/ 1.000327813 1,024.03 MiB uncore_imc/data_reads/ 1.000327813 20.73 MiB uncore_imc/data_writes/ 2.000580153 261,120.00 MiB uncore_imc/data_reads/ 2.000580153 23.28 MiB uncore_imc/data_writes/ The problem was introduced by commit: 07ce734dd8ad ("perf/x86/intel/uncore: Clean up client IMC") Where the read_counter callback was replace to point to the generic uncore_mmio_read_counter() function. The SNB IMC counters are freerunnig 32-bit counters laid out contiguously in MMIO. But uncore_mmio_read_counter() is using a readq() call to read from MMIO therefore reading 64-bit from MMIO. Although this is okay for the uncore_perf_event_update() function because it is shifting the value based on the actual counter width to compute a delta, it is not okay for the uncore_pmu_event_start() which is simply reading the counter and therefore priming the event->prev_count with a bogus value which is responsible for causing bogus deltas in the perf stat command above. The fix is to reintroduce the custom callback for read_counter for the SNB IMC PMU and use readl() instead of readq(). With the change the output of perf stat is back to normal: $ perf stat -a -I 1000 -e uncore_imc/data_reads/,uncore_imc/data_writes/ 1.000120987 296.94 MiB uncore_imc/data_reads/ 1.000120987 138.42 MiB uncore_imc/data_writes/ 2.000403144 175.91 MiB uncore_imc/data_reads/ 2.000403144 68.50 MiB uncore_imc/data_writes/ Fixes: 07ce734dd8ad ("perf/x86/intel/uncore: Clean up client IMC") Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Link: https://lore.kernel.org/r/20220803160031.1379788-1-eranian@google.com --- arch/x86/events/intel/uncore_snb.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index ce440011cc4e..1ef4f7861e2e 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -841,6 +841,22 @@ int snb_pci2phy_map_init(int devid) return 0; } +static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* + * SNB IMC counters are 32-bit and are laid out back to back + * in MMIO space. Therefore we must use a 32-bit accessor function + * using readq() from uncore_mmio_read_counter() causes problems + * because it is reading 64-bit at a time. This is okay for the + * uncore_perf_event_update() function because it drops the upper + * 32-bits but not okay for plain uncore_read_counter() as invoked + * in uncore_pmu_event_start(). + */ + return (u64)readl(box->io_addr + hwc->event_base); +} + static struct pmu snb_uncore_imc_pmu = { .task_ctx_nr = perf_invalid_context, .event_init = snb_uncore_imc_event_init, @@ -860,7 +876,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = { .disable_event = snb_uncore_imc_disable_event, .enable_event = snb_uncore_imc_enable_event, .hw_config = snb_uncore_imc_hw_config, - .read_counter = uncore_mmio_read_counter, + .read_counter = snb_uncore_imc_read_counter, }; static struct intel_uncore_type snb_uncore_imc = { -- cgit v1.2.3 From d6ffe6067a54972564552ea45d320fb98db1ac5e Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 26 Aug 2022 16:43:51 -0400 Subject: provide arch_test_bit_acquire for architectures that define test_bit Some architectures define their own arch_test_bit and they also need arch_test_bit_acquire, otherwise they won't compile. We also clean up the code by using the generic test_bit if that is equivalent to the arch-specific version. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/bitops.h | 7 ++----- arch/hexagon/include/asm/bitops.h | 15 +++++++++++++++ arch/ia64/include/asm/bitops.h | 7 ++----- arch/m68k/include/asm/bitops.h | 7 ++----- arch/s390/include/asm/bitops.h | 10 ++-------- arch/sh/include/asm/bitops-op32.h | 12 ++---------- 6 files changed, 25 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index 492c7713ddae..bafb1c1f0fdc 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -283,11 +283,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return (old & mask) != 0; } -static __always_inline bool -arch_test_bit(unsigned long nr, const volatile unsigned long *addr) -{ - return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; -} +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire /* * ffz = Find First Zero in word. Undefined if no zero exists, diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index da500471ac73..160d8f37fa1a 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -179,6 +179,21 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr) return retval; } +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} + /* * ffz - find first zero in word. * @word: The word to search diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 9f62af7fd7c4..1accb7842f58 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -331,11 +331,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return (old & bit) != 0; } -static __always_inline bool -arch_test_bit(unsigned long nr, const volatile unsigned long *addr) -{ - return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); -} +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire /** * ffz - find the first zero bit in a long word diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 470aed978590..e984af71df6b 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -157,11 +157,8 @@ arch___change_bit(unsigned long nr, volatile unsigned long *addr) change_bit(nr, addr); } -static __always_inline bool -arch_test_bit(unsigned long nr, const volatile unsigned long *addr) -{ - return (addr[nr >> 5] & (1UL << (nr & 31))) != 0; -} +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire static inline int bset_reg_test_and_set_bit(int nr, volatile unsigned long *vaddr) diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 9a7d15da966e..2de74fcd0578 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -176,14 +176,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return old & mask; } -static __always_inline bool -arch_test_bit(unsigned long nr, const volatile unsigned long *addr) -{ - const volatile unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - - return *p & mask; -} +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire static inline bool arch_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *ptr) diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h index 565a85d8b7fb..5ace89b46507 100644 --- a/arch/sh/include/asm/bitops-op32.h +++ b/arch/sh/include/asm/bitops-op32.h @@ -135,16 +135,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return (old & mask) != 0; } -/** - * arch_test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static __always_inline bool -arch_test_bit(unsigned long nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire #include -- cgit v1.2.3 From ca922fecda6caa5162409406dc3b663062d75089 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Fri, 19 Aug 2022 14:29:45 +0200 Subject: KVM: s390: pci: Hook to access KVM lowlevel from VFIO We have a cross dependency between KVM and VFIO when using s390 vfio_pci_zdev extensions for PCI passthrough To be able to keep both subsystem modular we add a registering hook inside the S390 core code. This fixes a build problem when VFIO is built-in and KVM is built as a module. Reported-by: Randy Dunlap Reported-by: kernel test robot Reviewed-by: Matthew Rosato Reviewed-by: Niklas Schnelle Signed-off-by: Pierre Morel Fixes: 09340b2fca007 ("KVM: s390: pci: add routines to start/stop interpretive execution") Cc: Acked-by: Janosch Frank Acked-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/r/20220819122945.9309-1-pmorel@linux.ibm.com Message-Id: <20220819122945.9309-1-pmorel@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/include/asm/kvm_host.h | 17 ++++++----------- arch/s390/kvm/pci.c | 12 ++++++++---- arch/s390/pci/Makefile | 2 +- arch/s390/pci/pci_kvm_hook.c | 11 +++++++++++ drivers/vfio/pci/vfio_pci_zdev.c | 8 ++++++-- 5 files changed, 32 insertions(+), 18 deletions(-) create mode 100644 arch/s390/pci/pci_kvm_hook.c (limited to 'arch') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index f39092e0ceaa..b1e98a9ed152 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -1038,16 +1038,11 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} #define __KVM_HAVE_ARCH_VM_FREE void kvm_arch_free_vm(struct kvm *kvm); -#ifdef CONFIG_VFIO_PCI_ZDEV_KVM -int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm); -void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev); -#else -static inline int kvm_s390_pci_register_kvm(struct zpci_dev *dev, - struct kvm *kvm) -{ - return -EPERM; -} -static inline void kvm_s390_pci_unregister_kvm(struct zpci_dev *dev) {} -#endif +struct zpci_kvm_hook { + int (*kvm_register)(void *opaque, struct kvm *kvm); + void (*kvm_unregister)(void *opaque); +}; + +extern struct zpci_kvm_hook zpci_kvm_hook; #endif diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c index 4946fb7757d6..bb8c335d17b9 100644 --- a/arch/s390/kvm/pci.c +++ b/arch/s390/kvm/pci.c @@ -431,8 +431,9 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev) * available, enable them and let userspace indicate whether or not they will * be used (specify SHM bit to disable). */ -int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm) +static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm) { + struct zpci_dev *zdev = opaque; int rc; if (!zdev) @@ -510,10 +511,10 @@ err: kvm_put_kvm(kvm); return rc; } -EXPORT_SYMBOL_GPL(kvm_s390_pci_register_kvm); -void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev) +static void kvm_s390_pci_unregister_kvm(void *opaque) { + struct zpci_dev *zdev = opaque; struct kvm *kvm; if (!zdev) @@ -566,7 +567,6 @@ out: kvm_put_kvm(kvm); } -EXPORT_SYMBOL_GPL(kvm_s390_pci_unregister_kvm); void kvm_s390_pci_init_list(struct kvm *kvm) { @@ -678,6 +678,8 @@ int kvm_s390_pci_init(void) spin_lock_init(&aift->gait_lock); mutex_init(&aift->aift_lock); + zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm; + zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm; return 0; } @@ -685,6 +687,8 @@ int kvm_s390_pci_init(void) void kvm_s390_pci_exit(void) { mutex_destroy(&aift->aift_lock); + zpci_kvm_hook.kvm_register = NULL; + zpci_kvm_hook.kvm_unregister = NULL; kfree(aift); } diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index bf557a1b789c..5ae31ca9dd44 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -5,5 +5,5 @@ obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \ - pci_bus.o + pci_bus.o pci_kvm_hook.o obj-$(CONFIG_PCI_IOV) += pci_iov.o diff --git a/arch/s390/pci/pci_kvm_hook.c b/arch/s390/pci/pci_kvm_hook.c new file mode 100644 index 000000000000..ff34baf50a3e --- /dev/null +++ b/arch/s390/pci/pci_kvm_hook.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VFIO ZPCI devices support + * + * Copyright (C) IBM Corp. 2022. All rights reserved. + * Author(s): Pierre Morel + */ +#include + +struct zpci_kvm_hook zpci_kvm_hook; +EXPORT_SYMBOL_GPL(zpci_kvm_hook); diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c index e163aa9f6144..0cbdcd14f1c8 100644 --- a/drivers/vfio/pci/vfio_pci_zdev.c +++ b/drivers/vfio/pci/vfio_pci_zdev.c @@ -151,7 +151,10 @@ int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev) if (!vdev->vdev.kvm) return 0; - return kvm_s390_pci_register_kvm(zdev, vdev->vdev.kvm); + if (zpci_kvm_hook.kvm_register) + return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm); + + return -ENOENT; } void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev) @@ -161,5 +164,6 @@ void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev) if (!zdev || !vdev->vdev.kvm) return; - kvm_s390_pci_unregister_kvm(zdev); + if (zpci_kvm_hook.kvm_unregister) + zpci_kvm_hook.kvm_unregister(zdev); } -- cgit v1.2.3