aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar2020-08-06 10:16:38 +0200
committerIngo Molnar2020-08-06 10:16:38 +0200
commita703f3633ff1d982bc4adfe7e0921bedb1701216 (patch)
treeeb85b29a0bbcb29045e197ab77e18ffc8649a722 /arch
parenta7ef9b28aa8d72a1656fa6f0a01bbd1493886317 (diff)
parentb5e6a027bd327daa679ca55182a920659e2cbb90 (diff)
Merge branch 'WIP.locking/seqlocks' into locking/urgent
Pick up the full seqlock series PeterZ is working on. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/atomic.h1
-rw-r--r--arch/arc/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/percpu.h2
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/h8300/include/asm/atomic.h2
-rw-r--r--arch/hexagon/include/asm/atomic.h2
-rw-r--r--arch/ia64/include/asm/atomic.h1
-rw-r--r--arch/m68k/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/atomic.h1
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/include/asm/dtl.h52
-rw-r--r--arch/powerpc/include/asm/lppaca.h44
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/svm.c1
-rw-r--r--arch/riscv/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/thread_info.h1
-rw-r--r--arch/sh/include/asm/atomic.h2
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h1
-rw-r--r--arch/sparc/include/asm/percpu_64.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h2
-rw-r--r--arch/x86/entry/common.c88
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/idtentry.h31
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/nmi.c9
-rw-r--r--arch/x86/kernel/traps.c23
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/xtensa/include/asm/atomic.h2
39 files changed, 169 insertions, 144 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 2144530d1428..e2093994fd0d 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -24,7 +24,6 @@
#define __atomic_acquire_fence()
#define __atomic_post_full_fence()
-#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 7298ce84762e..c614857eb209 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -14,8 +14,6 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifndef CONFIG_ARC_PLAT_EZNPS
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 75bb2c543e59..455eb19a5ac1 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifdef __KERNEL__
/*
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index f44f448537f2..e2fcb3cfd3de 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -5,6 +5,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+register unsigned long current_stack_pointer asm ("sp");
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 3609a6980c34..536b6b979f63 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -76,11 +76,6 @@ struct thread_info {
}
/*
- * how to get the current stack pointer in C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index a08890da696c..015ddffaf6ca 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -99,8 +99,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
-#define ATOMIC_INIT(i) { (i) }
-
#define arch_atomic_read(v) __READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index c6b6a06231b2..a990d151f163 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -12,8 +12,6 @@
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 0231d69c8bf2..4ab895d7111f 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -12,8 +12,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
/* Normal writes in our arch don't clear lock reservations */
static inline void atomic_set(atomic_t *v, int new)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 50440f3ddc43..f267d956458f 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -19,7 +19,6 @@
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 47228b0d4163..756c5cc58f94 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,6 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
-#define ATOMIC_INIT(i) { (i) }
-
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index e5ac88392d1f..f904084fcb1f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -45,7 +45,6 @@ static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
return xchg(&v->counter, n); \
}
-#define ATOMIC_INIT(i) { (i) }
ATOMIC_OPS(atomic, int)
#ifdef CONFIG_64BIT
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 118953d41763..f960e2f32b1b 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -136,8 +136,6 @@ ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define ATOMIC_INIT(i) { (i) }
-
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 498785ffc25f..0311c3c42960 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -11,8 +11,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
new file mode 100644
index 000000000000..1625888f27ef
--- /dev/null
+++ b/arch/powerpc/include/asm/dtl.h
@@ -0,0 +1,52 @@
+#ifndef _ASM_POWERPC_DTL_H
+#define _ASM_POWERPC_DTL_H
+
+#include <asm/lppaca.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Layout of entries in the hypervisor's dispatch trace log buffer.
+ */
+struct dtl_entry {
+ u8 dispatch_reason;
+ u8 preempt_reason;
+ __be16 processor_id;
+ __be32 enqueue_to_dispatch_time;
+ __be32 ready_to_enqueue_time;
+ __be32 waiting_to_ready_time;
+ __be64 timebase;
+ __be64 fault_addr;
+ __be64 srr0;
+ __be64 srr1;
+};
+
+#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
+#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+
+/*
+ * Dispatch trace log event enable mask:
+ * 0x1: voluntary virtual processor waits
+ * 0x2: time-slice preempts
+ * 0x4: virtual partition memory page faults
+ */
+#define DTL_LOG_CEDE 0x1
+#define DTL_LOG_PREEMPT 0x2
+#define DTL_LOG_FAULT 0x4
+#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+
+extern struct kmem_cache *dtl_cache;
+extern rwlock_t dtl_access_lock;
+
+/*
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
+ * reading from the dispatch trace log. If other code wants to consume
+ * DTL entries, it can set this pointer to a function that will get
+ * called once for each DTL entry that gets processed.
+ */
+extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
+
+extern void register_dtl_buffer(int cpu);
+extern void alloc_dtl_buffers(unsigned long *time_limit);
+extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
+#endif /* _ASM_POWERPC_DTL_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 3b4b305796ae..c390ec377bae 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -42,7 +42,6 @@
*/
#include <linux/cache.h>
#include <linux/threads.h>
-#include <linux/spinlock_types.h>
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
@@ -146,49 +145,6 @@ struct slb_shadow {
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-/*
- * Layout of entries in the hypervisor's dispatch trace log buffer.
- */
-struct dtl_entry {
- u8 dispatch_reason;
- u8 preempt_reason;
- __be16 processor_id;
- __be32 enqueue_to_dispatch_time;
- __be32 ready_to_enqueue_time;
- __be32 waiting_to_ready_time;
- __be64 timebase;
- __be64 fault_addr;
- __be64 srr0;
- __be64 srr1;
-};
-
-#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
-#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
-
-/*
- * Dispatch trace log event enable mask:
- * 0x1: voluntary virtual processor waits
- * 0x2: time-slice preempts
- * 0x4: virtual partition memory page faults
- */
-#define DTL_LOG_CEDE 0x1
-#define DTL_LOG_PREEMPT 0x2
-#define DTL_LOG_FAULT 0x4
-#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
-
-extern struct kmem_cache *dtl_cache;
-extern rwlock_t dtl_access_lock;
-
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
- * reading from the dispatch trace log. If other code wants to consume
- * DTL entries, it can set this pointer to a function that will get
- * called once for each DTL entry that gets processed.
- */
-extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
-
-extern void register_dtl_buffer(int cpu);
-extern void alloc_dtl_buffers(unsigned long *time_limit);
extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
#endif /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 45a839a7c6cf..84b2564cf5a4 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -29,7 +29,6 @@
#include <asm/hmi.h>
#include <asm/cpuidle.h>
#include <asm/atomic.h>
-#include <asm/rtas-types.h>
#include <asm-generic/mmiowb_types.h>
@@ -53,6 +52,7 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
+struct rtas_args;
/*
* Defines the layout of the paca.
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6fcae436ae51..f85539ebb513 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -183,6 +183,8 @@ static inline unsigned long read_spurr(unsigned long tb)
#ifdef CONFIG_PPC_SPLPAR
+#include <asm/dtl.h>
+
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6bf66649ab92..ebb04f331ad3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -74,6 +74,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
+#include <asm/dtl.h>
#include "book3s.h"
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index eab8aa293743..982f069e4c31 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
+#include <asm/dtl.h>
#include <asm/lppaca.h>
#include <asm/debugfs.h>
#include <asm/plpar_wrappers.h>
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index fd26f3d21d7b..f71ff2c94efe 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -40,6 +40,7 @@
#include <asm/fadump.h>
#include <asm/asm-prototypes.h>
#include <asm/debugfs.h>
+#include <asm/dtl.h>
#include "pseries.h"
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 2db8469e475f..27094c872fd6 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -70,6 +70,7 @@
#include <asm/idle.h>
#include <asm/swiotlb.h>
#include <asm/svm.h>
+#include <asm/dtl.h>
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index 40c0637203d5..e6d7a344d9f2 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -11,6 +11,7 @@
#include <asm/svm.h>
#include <asm/swiotlb.h>
#include <asm/ultravisor.h>
+#include <asm/dtl.h>
static int __init init_svm(void)
{
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 96f95c9ebd97..400a8c8b6de7 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -19,8 +19,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 491ad53a0d4e..cae473a7b6f7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-#define ATOMIC_INIT(i) { (i) }
-
static inline int atomic_read(const atomic_t *v)
{
int c;
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 7326f110d48c..f48a43b63d9e 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -10,6 +10,7 @@
#include <asm/sigp.h>
#include <asm/lowcore.h>
+#include <asm/processor.h>
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index e582fbe59e20..13a04fcf7762 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/page.h>
-#include <asm/processor.h>
#define STACK_INIT_OFFSET \
(THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index f37b95a80232..7c2a8a703b9a 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -19,8 +19,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 94c930f0bc62..efad5532f169 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -18,8 +18,6 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
-#define ATOMIC_INIT(i) { (i) }
-
int atomic_add_return(int, atomic_t *);
int atomic_fetch_add(int, atomic_t *);
int atomic_fetch_and(int, atomic_t *);
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index b60448397d4f..6b235d3d1d9d 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -12,7 +12,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index 32ef6f05cc56..a8786a4b90b6 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -4,7 +4,9 @@
#include <linux/compiler.h>
+#ifndef BUILD_VDSO
register unsigned long __local_per_cpu_offset asm("g5");
+#endif
#ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index 0f6d0c4f6683..ace0d48e837e 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -2,6 +2,8 @@
#ifndef _SPARC_TRAP_BLOCK_H
#define _SPARC_TRAP_BLOCK_H
+#include <linux/threads.h>
+
#include <asm/hypervisor.h>
#include <asm/asi.h>
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index f09288431f28..54ad1890aefc 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -559,8 +559,7 @@ SYSCALL_DEFINE0(ni_syscall)
}
/**
- * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
- * RCU handling
+ * idtentry_enter - Handle state tracking on ordinary idtentries
* @regs: Pointer to pt_regs of interrupted context
*
* Invokes:
@@ -572,6 +571,9 @@ SYSCALL_DEFINE0(ni_syscall)
* - The hardirq tracer to keep the state consistent as low level ASM
* entry disabled interrupts.
*
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
* For kernel mode entries RCU handling is done conditional. If RCU is
* watching then the only RCU requirement is to check whether the tick has
* to be restarted. If RCU is not watching then rcu_irq_enter() has to be
@@ -585,18 +587,21 @@ SYSCALL_DEFINE0(ni_syscall)
* establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
* would not be possible.
*
- * Returns: True if RCU has been adjusted on a kernel entry
- * False otherwise
+ * Returns: An opaque object that must be passed to idtentry_exit()
*
- * The return value must be fed into the rcu_exit argument of
- * idtentry_exit_cond_rcu().
+ * The return value must be fed into the state argument of
+ * idtentry_exit().
*/
-bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
+noinstr idtentry_state_t idtentry_enter(struct pt_regs *regs)
{
+ idtentry_state_t ret = {
+ .exit_rcu = false,
+ };
+
if (user_mode(regs)) {
check_user_regs(regs);
enter_from_user_mode();
- return false;
+ return ret;
}
/*
@@ -634,7 +639,8 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
trace_hardirqs_off_finish();
instrumentation_end();
- return true;
+ ret.exit_rcu = true;
+ return ret;
}
/*
@@ -649,7 +655,7 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
trace_hardirqs_off();
instrumentation_end();
- return false;
+ return ret;
}
static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
@@ -667,10 +673,9 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
}
/**
- * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
- * handling
+ * idtentry_exit - Handle return from exception that used idtentry_enter()
* @regs: Pointer to pt_regs (exception entry regs)
- * @rcu_exit: Invoke rcu_irq_exit() if true
+ * @state: Return value from matching call to idtentry_enter()
*
* Depending on the return target (kernel/user) this runs the necessary
* preemption and work checks if possible and reguired and returns to
@@ -679,10 +684,10 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
* This is the last action before returning to the low level ASM code which
* just needs to return to the appropriate context.
*
- * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
- * function must be fed into the @rcu_exit argument.
+ * Counterpart to idtentry_enter(). The return value of the entry
+ * function must be fed into the @state argument.
*/
-void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
+noinstr void idtentry_exit(struct pt_regs *regs, idtentry_state_t state)
{
lockdep_assert_irqs_disabled();
@@ -695,7 +700,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
* carefully and needs the same ordering of lockdep/tracing
* and RCU as the return to user mode path.
*/
- if (rcu_exit) {
+ if (state.exit_rcu) {
instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
@@ -714,7 +719,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
* IRQ flags state is correct already. Just tell RCU if it
* was not watching on entry.
*/
- if (rcu_exit)
+ if (state.exit_rcu)
rcu_irq_exit();
}
}
@@ -726,7 +731,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
* Invokes enter_from_user_mode() to establish the proper context for
* NOHZ_FULL. Otherwise scheduling on exit would not be possible.
*/
-void noinstr idtentry_enter_user(struct pt_regs *regs)
+noinstr void idtentry_enter_user(struct pt_regs *regs)
{
check_user_regs(regs);
enter_from_user_mode();
@@ -744,13 +749,47 @@ void noinstr idtentry_enter_user(struct pt_regs *regs)
*
* Counterpart to idtentry_enter_user().
*/
-void noinstr idtentry_exit_user(struct pt_regs *regs)
+noinstr void idtentry_exit_user(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
prepare_exit_to_usermode(regs);
}
+noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
+{
+ bool irq_state = lockdep_hardirqs_enabled();
+
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
+ instrumentation_end();
+
+ return irq_state;
+}
+
+noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
+{
+ instrumentation_begin();
+ ftrace_nmi_exit();
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+ instrumentation_end();
+
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
+}
+
#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*
@@ -800,9 +839,10 @@ static void __xen_pv_evtchn_do_upcall(void)
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs;
- bool inhcall, rcu_exit;
+ bool inhcall;
+ idtentry_state_t state;
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = idtentry_enter(regs);
old_regs = set_irq_regs(regs);
instrumentation_begin();
@@ -812,13 +852,13 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
set_irq_regs(old_regs);
inhcall = get_and_clear_inhcall();
- if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
+ if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
instrumentation_begin();
idtentry_exit_cond_resched(regs, true);
instrumentation_end();
restore_inhcall(inhcall);
} else {
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ idtentry_exit(regs, state);
}
}
#endif /* CONFIG_XEN_PV */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index bf35e476a776..b6cac6e9bb70 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -14,8 +14,6 @@
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
/**
* arch_atomic_read - read atomic variable
* @v: pointer of type atomic_t
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 80d3b30d3ee3..d74128c964f8 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -13,8 +13,15 @@
void idtentry_enter_user(struct pt_regs *regs);
void idtentry_exit_user(struct pt_regs *regs);
-bool idtentry_enter_cond_rcu(struct pt_regs *regs);
-void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
+typedef struct idtentry_state {
+ bool exit_rcu;
+} idtentry_state_t;
+
+idtentry_state_t idtentry_enter(struct pt_regs *regs);
+void idtentry_exit(struct pt_regs *regs, idtentry_state_t state);
+
+bool idtentry_enter_nmi(struct pt_regs *regs);
+void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
@@ -54,12 +61,12 @@ static __always_inline void __##func(struct pt_regs *regs); \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ idtentry_state_t state = idtentry_enter(regs); \
\
instrumentation_begin(); \
__##func (regs); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ idtentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs)
@@ -101,12 +108,12 @@ static __always_inline void __##func(struct pt_regs *regs, \
__visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ idtentry_state_t state = idtentry_enter(regs); \
\
instrumentation_begin(); \
__##func (regs, error_code); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ idtentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs, \
@@ -199,7 +206,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
__visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ idtentry_state_t state = idtentry_enter(regs); \
\
instrumentation_begin(); \
irq_enter_rcu(); \
@@ -207,7 +214,7 @@ __visible noinstr void func(struct pt_regs *regs, \
__##func (regs, (u8)error_code); \
irq_exit_rcu(); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ idtentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs, u8 vector)
@@ -241,7 +248,7 @@ static void __##func(struct pt_regs *regs); \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ idtentry_state_t state = idtentry_enter(regs); \
\
instrumentation_begin(); \
irq_enter_rcu(); \
@@ -249,7 +256,7 @@ __visible noinstr void func(struct pt_regs *regs) \
run_on_irqstack_cond(__##func, regs, regs); \
irq_exit_rcu(); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ idtentry_exit(regs, state); \
} \
\
static noinline void __##func(struct pt_regs *regs)
@@ -270,7 +277,7 @@ static __always_inline void __##func(struct pt_regs *regs); \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
- bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ idtentry_state_t state = idtentry_enter(regs); \
\
instrumentation_begin(); \
__irq_enter_raw(); \
@@ -278,7 +285,7 @@ __visible noinstr void func(struct pt_regs *regs) \
__##func (regs); \
__irq_exit_raw(); \
instrumentation_end(); \
- idtentry_exit_cond_rcu(regs, rcu_exit); \
+ idtentry_exit(regs, state); \
} \
\
static __always_inline void __##func(struct pt_regs *regs)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index df63786e7bfa..3f78482d9496 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
u32 reason = kvm_read_and_reset_apf_flags();
- bool rcu_exit;
+ idtentry_state_t state;
switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT:
@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false;
}
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = idtentry_enter(regs);
instrumentation_begin();
/*
@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
}
instrumentation_end();
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ idtentry_exit(regs, state);
return true;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index d7c5e44b26f7..4fc9954a9560 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -330,7 +330,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
__this_cpu_write(last_nmi_rip, regs->ip);
instrumentation_begin();
- trace_hardirqs_off_finish();
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
@@ -417,8 +416,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
unknown_nmi_error(reason, regs);
out:
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
}
@@ -478,6 +475,8 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
DEFINE_IDTENTRY_RAW(exc_nmi)
{
+ bool irq_state;
+
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
return;
@@ -491,14 +490,14 @@ nmi_restart:
this_cpu_write(nmi_dr7, local_db_save());
- nmi_enter();
+ irq_state = idtentry_enter_nmi(regs);
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
local_db_restore(this_cpu_read(nmi_dr7));
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b7cb3e0716f7..8493f55e1167 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_invalid_op)
{
- bool rcu_exit;
+ idtentry_state_t state;
/*
* We use UD2 as a short encoding for 'CALL __WARN', as such
@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
if (!user_mode(regs) && handle_bug(regs))
return;
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = idtentry_enter(regs);
instrumentation_begin();
handle_invalid_op(regs);
instrumentation_end();
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ idtentry_exit(regs, state);
}
DEFINE_IDTENTRY(exc_coproc_segment_overrun)
@@ -405,7 +405,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
}
#endif
- nmi_enter();
+ idtentry_enter_nmi(regs);
instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -651,15 +651,12 @@ DEFINE_IDTENTRY_RAW(exc_int3)
instrumentation_end();
idtentry_exit_user(regs);
} else {
- nmi_enter();
+ bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
- trace_hardirqs_off_finish();
if (!do_int3(regs))
die("int3", regs, 0);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
}
}
@@ -867,9 +864,8 @@ out:
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
- nmi_enter();
+ bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
- trace_hardirqs_off_finish();
/*
* If something gets miswired and we end up here for a user mode
@@ -886,10 +882,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
handle_debug(regs, dr6, false);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs, irq_state);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
@@ -905,6 +899,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
instrumentation_begin();
handle_debug(regs, dr6, true);
+
instrumentation_end();
idtentry_exit_user(regs);
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1ead568c0101..5e41949453cc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code,
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
{
unsigned long address = read_cr2();
- bool rcu_exit;
+ idtentry_state_t state;
prefetchw(&current->mm->mmap_lock);
@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* code reenabled RCU to avoid subsequent wreckage which helps
* debugability.
*/
- rcu_exit = idtentry_enter_cond_rcu(regs);
+ state = idtentry_enter(regs);
instrumentation_begin();
handle_page_fault(regs, error_code, address);
instrumentation_end();
- idtentry_exit_cond_rcu(regs, rcu_exit);
+ idtentry_exit(regs, state);
}
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 3e7c6134ed32..744c2f463845 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -19,8 +19,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
/*
* This Xtensa implementation assumes that the right mechanism
* for exclusion is for locking interrupts to level EXCM_LEVEL.