aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorNicholas Piggin2022-02-04 13:53:48 +1000
committerMichael Ellerman2022-02-24 12:46:54 +1100
commit8b91cee5eadd2021f55e6775f2d50bd56d00c217 (patch)
tree98e1f636bb0ef44b62a7b70c164c279a0c598a48 /arch/powerpc/perf
parent406a8c1d8fa59ae6a6462a6fb6ff892f6a4f7499 (diff)
powerpc/64s/hash: Make hash faults work in NMI context
Hash faults are not resoved in NMI context, instead causing the access to fail. This is done because perf interrupts can get backtraces including walking the user stack, and taking a hash fault on those could deadlock on the HPTE lock if the perf interrupt hits while the same HPTE lock is being held by the hash fault code. The user-access for the stack walking will notice the access failed and deal with that in the perf code. The reason to allow perf interrupts in is to better profile hash faults. The problem with this is any hash fault on a kernel access that happens in NMI context will crash, because kernel accesses must not fail. Hard lockups, system reset, machine checks that access vmalloc space including modules and including stack backtracing and symbol lookup in modules, per-cpu data, etc could all run into this problem. Fix this by disallowing perf interrupts in the hash fault code (the direct hash fault is covered by MSR[EE]=0 so the PMI disable just needs to extend to the preload case). This simplifies the tricky logic in hash faults and perf, at the cost of reduced profiling of hash faults. perf can still latch addresses when interrupts are disabled, it just won't get the stack trace at that point, so it would still find hot spots, just sometimes with confusing stack chains. An alternative could be to allow perf interrupts here but always do the slowpath stack walk if we are in nmi context, but that slows down all perf interrupt stack walking on hash though and it does not remove as much tricky code. Reported-by: Laurent Dufour <ldufour@linux.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220204035348.545435-1-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/callchain.h9
-rw-r--r--arch/powerpc/perf/callchain_64.c27
2 files changed, 1 insertions, 35 deletions
diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h
index d6fa6e25234f..19a8d051ddf1 100644
--- a/arch/powerpc/perf/callchain.h
+++ b/arch/powerpc/perf/callchain.h
@@ -2,7 +2,6 @@
#ifndef _POWERPC_PERF_CALLCHAIN_H
#define _POWERPC_PERF_CALLCHAIN_H
-int read_user_stack_slow(const void __user *ptr, void *buf, int nb);
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs);
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
@@ -26,17 +25,11 @@ static inline int __read_user_stack(const void __user *ptr, void *ret,
size_t size)
{
unsigned long addr = (unsigned long)ptr;
- int rc;
if (addr > TASK_SIZE - size || (addr & (size - 1)))
return -EFAULT;
- rc = copy_from_user_nofault(ret, ptr, size);
-
- if (IS_ENABLED(CONFIG_PPC64) && !radix_enabled() && rc)
- return read_user_stack_slow(ptr, ret, size);
-
- return rc;
+ return copy_from_user_nofault(ret, ptr, size);
}
#endif /* _POWERPC_PERF_CALLCHAIN_H */
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index 8d0df4226328..488e8a21a11e 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -18,33 +18,6 @@
#include "callchain.h"
-/*
- * On 64-bit we don't want to invoke hash_page on user addresses from
- * interrupt context, so if the access faults, we read the page tables
- * to find which page (if any) is mapped and access it directly. Radix
- * has no need for this so it doesn't use read_user_stack_slow.
- */
-int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
-{
-
- unsigned long addr = (unsigned long) ptr;
- unsigned long offset;
- struct page *page;
- void *kaddr;
-
- if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
- kaddr = page_address(page);
-
- /* align address to page boundary */
- offset = addr & ~PAGE_MASK;
-
- memcpy(buf, kaddr + offset, nb);
- put_page(page);
- return 0;
- }
- return -EFAULT;
-}
-
static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
{
return __read_user_stack(ptr, ret, sizeof(*ret));