diff options
author | Linus Torvalds | 2017-07-03 11:34:53 -0700 |
---|---|---|
committer | Linus Torvalds | 2017-07-03 11:34:53 -0700 |
commit | 330e9e46253cbfab178450c976aa90ef0f3ae940 (patch) | |
tree | 4a9875d264a477449a9c9a0ff23937550d1c3234 /include | |
parent | e94693f797400b9f424c69e1da9381e4846762fe (diff) | |
parent | 567b64aaefc4ef9ae3af124ae0b13dc13a6804a8 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar:
"The sole purpose of these changes is to shrink and simplify the RCU
code base, which has suffered from creeping bloat over the past couple
of years. The end result is a net removal of ~2700 lines of code:
79 files changed, 1496 insertions(+), 4211 deletions(-)
Plus there's a marked reduction in the Kconfig space complexity as
well, here's the number of matches on 'grep RCU' in the .config:
before after
x86-defconfig 17 15
x86-allmodconfig 33 20"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (86 commits)
rcu: Remove RCU CPU stall warnings from Tiny RCU
rcu: Remove event tracing from Tiny RCU
rcu: Move RCU debug Kconfig options to kernel/rcu
rcu: Move RCU non-debug Kconfig options to kernel/rcu
rcu: Eliminate NOCBs CPU-state Kconfig options
rcu: Remove debugfs tracing
srcu: Remove Classic SRCU
srcu: Fix rcutorture-statistics typo
rcu: Remove SPARSE_RCU_POINTER Kconfig option
rcu: Remove the now-obsolete PROVE_RCU_REPEATEDLY Kconfig option
rcu: Remove typecheck() from RCU locking wrapper functions
rcu: Remove #ifdef moving rcu_end_inkernel_boot from rcupdate.h
rcu: Remove nohz_full full-system-idle state machine
rcu: Remove the RCU_KTHREAD_PRIO Kconfig option
rcu: Remove *_SLOW_* Kconfig options
srcu: Use rnp->lock wrappers to replace explicit memory barriers
rcu: Move rnp->lock wrappers for SRCU use
rcu: Convert rnp->lock wrappers to macros for SRCU use
rcu: Refactor #includes from include/linux/rcupdate.h
bcm47xx: Fix build regression
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bcm47xx_nvram.h | 1 | ||||
-rw-r--r-- | include/linux/compiler.h | 4 | ||||
-rw-r--r-- | include/linux/rcu_node_tree.h | 4 | ||||
-rw-r--r-- | include/linux/rcu_segcblist.h | 4 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 318 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 167 | ||||
-rw-r--r-- | include/linux/rcutree.h | 21 | ||||
-rw-r--r-- | include/linux/spinlock.h | 20 | ||||
-rw-r--r-- | include/linux/srcu.h | 25 | ||||
-rw-r--r-- | include/linux/srcuclassic.h | 115 | ||||
-rw-r--r-- | include/linux/srcutiny.h | 47 | ||||
-rw-r--r-- | include/linux/srcutree.h | 13 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 1 |
13 files changed, 102 insertions, 638 deletions
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h index 2793652fbf66..a414a2b53e41 100644 --- a/include/linux/bcm47xx_nvram.h +++ b/include/linux/bcm47xx_nvram.h @@ -8,6 +8,7 @@ #ifndef __BCM47XX_NVRAM_H #define __BCM47XX_NVRAM_H +#include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/vmalloc.h> diff --git a/include/linux/compiler.h b/include/linux/compiler.h index f8110051188f..707242fdbb89 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -17,11 +17,7 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) -#ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) -#else /* CONFIG_SPARSE_RCU_POINTER */ -# define __rcu -#endif /* CONFIG_SPARSE_RCU_POINTER */ # define __private __attribute__((noderef)) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h index 4b766b61e1a0..426cee67f0e2 100644 --- a/include/linux/rcu_node_tree.h +++ b/include/linux/rcu_node_tree.h @@ -7,6 +7,10 @@ * unlimited scalability while maintaining a constant level of contention * on the root node. * + * This seemingly RCU-private file must be available to SRCU users + * because the size of the TREE SRCU srcu_struct structure depends + * on these definitions. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index ba4d2621d9ca..c3ad00e63556 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -1,6 +1,10 @@ /* * RCU segmented callback lists * + * This seemingly RCU-private file must be available to SRCU users + * because the size of the TREE SRCU srcu_struct structure depends + * on these definitions. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e1e5d002fdb9..f816fc72b51e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -34,104 +34,15 @@ #define __LINUX_RCUPDATE_H #include <linux/types.h> -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> -#include <linux/lockdep.h> -#include <linux/debugobjects.h> -#include <linux/bug.h> #include <linux/compiler.h> -#include <linux/ktime.h> +#include <linux/atomic.h> #include <linux/irqflags.h> +#include <linux/preempt.h> +#include <linux/bottom_half.h> +#include <linux/lockdep.h> +#include <asm/processor.h> +#include <linux/cpumask.h> -#include <asm/barrier.h> - -#ifndef CONFIG_TINY_RCU -extern int rcu_expedited; /* for sysctl */ -extern int rcu_normal; /* also for sysctl */ -#endif /* #ifndef CONFIG_TINY_RCU */ - -#ifdef CONFIG_TINY_RCU -/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ -static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ -{ - return true; -} -static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ -{ - return false; -} - -static inline void rcu_expedite_gp(void) -{ -} - -static inline void rcu_unexpedite_gp(void) -{ -} -#else /* #ifdef CONFIG_TINY_RCU */ -bool rcu_gp_is_normal(void); /* Internal RCU use. */ -bool rcu_gp_is_expedited(void); /* Internal RCU use. */ -void rcu_expedite_gp(void); -void rcu_unexpedite_gp(void); -#endif /* #else #ifdef CONFIG_TINY_RCU */ - -enum rcutorture_type { - RCU_FLAVOR, - RCU_BH_FLAVOR, - RCU_SCHED_FLAVOR, - RCU_TASKS_FLAVOR, - SRCU_FLAVOR, - INVALID_RCU_FLAVOR -}; - -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) -void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, - unsigned long *gpnum, unsigned long *completed); -void rcutorture_record_test_transition(void); -void rcutorture_record_progress(unsigned long vernum); -void do_trace_rcu_torture_read(const char *rcutorturename, - struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, - unsigned long c); -bool rcu_irq_enter_disabled(void); -#else -static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, - int *flags, - unsigned long *gpnum, - unsigned long *completed) -{ - *flags = 0; - *gpnum = 0; - *completed = 0; -} -static inline void rcutorture_record_test_transition(void) -{ -} -static inline void rcutorture_record_progress(unsigned long vernum) -{ -} -static inline bool rcu_irq_enter_disabled(void) -{ - return false; -} -#ifdef CONFIG_RCU_TRACE -void do_trace_rcu_torture_read(const char *rcutorturename, - struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, - unsigned long c); -#else -#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ - do { } while (0) -#endif -#endif - -#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) -#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define ulong2long(a) (*(long *)(&(a))) @@ -139,115 +50,14 @@ void do_trace_rcu_torture_read(const char *rcutorturename, /* Exported common interfaces */ #ifdef CONFIG_PREEMPT_RCU - -/** - * call_rcu() - Queue an RCU callback for invocation after a grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all pre-existing RCU read-side - * critical sections have completed. However, the callback function - * might well execute concurrently with RCU read-side critical sections - * that started after call_rcu() was invoked. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. - * - * Note that all CPUs must agree that the grace period extended beyond - * all pre-existing RCU read-side critical section. On systems with more - * than one CPU, this means that when "func()" is invoked, each CPU is - * guaranteed to have executed a full memory barrier since the end of its - * last RCU read-side critical section whose beginning preceded the call - * to call_rcu(). It also means that each CPU executing an RCU read-side - * critical section that continues beyond the start of "func()" must have - * executed a memory barrier after the call_rcu() but before the beginning - * of that RCU read-side critical section. Note that these guarantees - * include CPUs that are offline, idle, or executing in user mode, as - * well as CPUs that are executing in the kernel. - * - * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the - * resulting RCU callback function "func()", then both CPU A and CPU B are - * guaranteed to execute a full memory barrier during the time interval - * between the call to call_rcu() and the invocation of "func()" -- even - * if CPU A and CPU B are the same CPU (but again only if the system has - * more than one CPU). - */ -void call_rcu(struct rcu_head *head, - rcu_callback_t func); - +void call_rcu(struct rcu_head *head, rcu_callback_t func); #else /* #ifdef CONFIG_PREEMPT_RCU */ - -/* In classic RCU, call_rcu() is just call_rcu_sched(). */ #define call_rcu call_rcu_sched - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ -/** - * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_bh() assumes - * that the read-side critical sections end on completion of a softirq - * handler. This means that read-side critical sections in process - * context must not be interrupted by softirqs. This interface is to be - * used when most of the read-side critical sections are in softirq context. - * RCU read-side critical sections are delimited by : - * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. - * OR - * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. - * These may be nested. - * - * See the description of call_rcu() for more detailed information on - * memory ordering guarantees. - */ -void call_rcu_bh(struct rcu_head *head, - rcu_callback_t func); - -/** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_sched() assumes - * that the read-side critical sections end on enabling of preemption - * or on voluntary preemption. - * RCU read-side critical sections are delimited by : - * - rcu_read_lock_sched() and rcu_read_unlock_sched(), - * OR - * anything that disables preemption. - * These may be nested. - * - * See the description of call_rcu() for more detailed information on - * memory ordering guarantees. - */ -void call_rcu_sched(struct rcu_head *head, - rcu_callback_t func); - +void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); +void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); - -/** - * call_rcu_tasks() - Queue an RCU for invocation task-based grace period - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_tasks() assumes - * that the read-side critical sections end at a voluntary context - * switch (not a preemption!), entry into idle, or transition to usermode - * execution. As such, there are no read-side primitives analogous to - * rcu_read_lock() and rcu_read_unlock() because this primitive is intended - * to determine that all tasks have passed through a safe state, not so - * much for data-strcuture synchronization. - * - * See the description of call_rcu() for more detailed information on - * memory ordering guarantees. - */ void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); @@ -301,22 +111,12 @@ void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); void rcu_cpu_starting(unsigned int cpu); -#ifndef CONFIG_TINY_RCU -void rcu_end_inkernel_boot(void); -#else /* #ifndef CONFIG_TINY_RCU */ -static inline void rcu_end_inkernel_boot(void) { } -#endif /* #ifndef CONFIG_TINY_RCU */ - #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ -static inline void rcu_sysrq_start(void) -{ -} -static inline void rcu_sysrq_end(void) -{ -} +static inline void rcu_sysrq_start(void) { } +static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #ifdef CONFIG_NO_HZ_FULL @@ -330,9 +130,7 @@ static inline void rcu_user_exit(void) { } #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static inline void rcu_init_nohz(void) -{ -} +static inline void rcu_init_nohz(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /** @@ -397,10 +195,6 @@ do { \ rcu_note_voluntary_context_switch(current); \ } while (0) -#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) -bool __rcu_is_watching(void); -#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ - /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. @@ -414,10 +208,6 @@ bool __rcu_is_watching(void); #error "Unknown RCU implementation specified to kernel configuration" #endif -#define RCU_SCHEDULER_INACTIVE 0 -#define RCU_SCHEDULER_INIT 1 -#define RCU_SCHEDULER_RUNNING 2 - /* * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic * initialization and destruction of rcu_head on the stack. rcu_head structures @@ -430,30 +220,16 @@ void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ -static inline void init_rcu_head(struct rcu_head *head) -{ -} - -static inline void destroy_rcu_head(struct rcu_head *head) -{ -} - -static inline void init_rcu_head_on_stack(struct rcu_head *head) -{ -} - -static inline void destroy_rcu_head_on_stack(struct rcu_head *head) -{ -} +static inline void init_rcu_head(struct rcu_head *head) { } +static inline void destroy_rcu_head(struct rcu_head *head) { } +static inline void init_rcu_head_on_stack(struct rcu_head *head) { } +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ -static inline bool rcu_lockdep_current_cpu_online(void) -{ - return true; -} +static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -473,18 +249,8 @@ extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); - int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); - -/** - * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? - * - * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an - * RCU-sched read-side critical section. In absence of - * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side - * critical section unless it can prove otherwise. - */ int rcu_read_lock_sched_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -531,9 +297,7 @@ static inline void rcu_preempt_sleep_check(void) "Illegal context switch in RCU read-side critical section"); } #else /* #ifdef CONFIG_PROVE_RCU */ -static inline void rcu_preempt_sleep_check(void) -{ -} +static inline void rcu_preempt_sleep_check(void) { } #endif /* #else #ifdef CONFIG_PROVE_RCU */ #define rcu_sleep_check() \ @@ -1084,52 +848,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define kfree_rcu(ptr, rcu_head) \ __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) -#ifdef CONFIG_TINY_RCU -static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) -{ - *nextevt = KTIME_MAX; - return 0; -} -#endif /* #ifdef CONFIG_TINY_RCU */ - -#if defined(CONFIG_RCU_NOCB_CPU_ALL) -static inline bool rcu_is_nocb_cpu(int cpu) { return true; } -#elif defined(CONFIG_RCU_NOCB_CPU) -bool rcu_is_nocb_cpu(int cpu); -#else -static inline bool rcu_is_nocb_cpu(int cpu) { return false; } -#endif - - -/* Only for use by adaptive-ticks code. */ -#ifdef CONFIG_NO_HZ_FULL_SYSIDLE -bool rcu_sys_is_idle(void); -void rcu_sysidle_force_exit(void); -#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ - -static inline bool rcu_sys_is_idle(void) -{ - return false; -} - -static inline void rcu_sysidle_force_exit(void) -{ -} - -#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ - - -/* - * Dump the ftrace buffer, but only one time per callsite per boot. - */ -#define rcu_ftrace_dump(oops_dump_mode) \ -do { \ - static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ - \ - if (!atomic_read(&___rfd_beenhere) && \ - !atomic_xchg(&___rfd_beenhere, 1)) \ - ftrace_dump(oops_dump_mode); \ -} while (0) /* * Place this after a lock-acquisition primitive to guarantee that diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 74d9c3a1feee..5becbbccb998 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -25,7 +25,7 @@ #ifndef __LINUX_TINY_H #define __LINUX_TINY_H -#include <linux/cache.h> +#include <linux/ktime.h> struct rcu_dynticks; static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) @@ -33,10 +33,8 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) return 0; } -static inline bool rcu_eqs_special_set(int cpu) -{ - return false; /* Never flag non-existent other CPUs! */ -} +/* Never flag non-existent other CPUs! */ +static inline bool rcu_eqs_special_set(int cpu) { return false; } static inline unsigned long get_state_synchronize_rcu(void) { @@ -98,159 +96,38 @@ static inline void kfree_call_rcu(struct rcu_head *head, rcu_note_voluntary_context_switch_lite(current); \ } while (0) -/* - * Take advantage of the fact that there is only one CPU, which - * allows us to ignore virtualization-based context switches. - */ -static inline void rcu_virt_note_context_switch(int cpu) -{ -} - -/* - * Return the number of grace periods started. - */ -static inline unsigned long rcu_batches_started(void) -{ - return 0; -} - -/* - * Return the number of bottom-half grace periods started. - */ -static inline unsigned long rcu_batches_started_bh(void) -{ - return 0; -} - -/* - * Return the number of sched grace periods started. - */ -static inline unsigned long rcu_batches_started_sched(void) -{ - return 0; -} - -/* - * Return the number of grace periods completed. - */ -static inline unsigned long rcu_batches_completed(void) -{ - return 0; -} - -/* - * Return the number of bottom-half grace periods completed. - */ -static inline unsigned long rcu_batches_completed_bh(void) -{ - return 0; -} - -/* - * Return the number of sched grace periods completed. - */ -static inline unsigned long rcu_batches_completed_sched(void) +static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) { + *nextevt = KTIME_MAX; return 0; } /* - * Return the number of expedited grace periods completed. - */ -static inline unsigned long rcu_exp_batches_completed(void) -{ - return 0; -} - -/* - * Return the number of expedited sched grace periods completed. + * Take advantage of the fact that there is only one CPU, which + * allows us to ignore virtualization-based context switches. */ -static inline unsigned long rcu_exp_batches_completed_sched(void) -{ - return 0; -} - -static inline void rcu_force_quiescent_state(void) -{ -} - -static inline void rcu_bh_force_quiescent_state(void) -{ -} - -static inline void rcu_sched_force_quiescent_state(void) -{ -} - -static inline void show_rcu_gp_kthreads(void) -{ -} - -static inline void rcu_cpu_stall_reset(void) -{ -} - -static inline void rcu_idle_enter(void) -{ -} - -static inline void rcu_idle_exit(void) -{ -} - -static inline void rcu_irq_enter(void) -{ -} - -static inline void rcu_irq_exit_irqson(void) -{ -} - -static inline void rcu_irq_enter_irqson(void) -{ -} - -static inline void rcu_irq_exit(void) -{ -} - -static inline void exit_rcu(void) -{ -} +static inline void rcu_virt_note_context_switch(int cpu) { } +static inline void rcu_cpu_stall_reset(void) { } +static inline void rcu_idle_enter(void) { } +static inline void rcu_idle_exit(void) { } +static inline void rcu_irq_enter(void) { } +static inline bool rcu_irq_enter_disabled(void) { return false; } +static inline void rcu_irq_exit_irqson(void) { } +static inline void rcu_irq_enter_irqson(void) { } +static inline void rcu_irq_exit(void) { } +static inline void exit_rcu(void) { } #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) extern int rcu_scheduler_active __read_mostly; void rcu_scheduler_starting(void); #else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ -static inline void rcu_scheduler_starting(void) -{ -} +static inline void rcu_scheduler_starting(void) { } #endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ +static inline void rcu_end_inkernel_boot(void) { } +static inline bool rcu_is_watching(void) { return true; } -#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) - -static inline bool rcu_is_watching(void) -{ - return __rcu_is_watching(); -} - -#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ - -static inline bool rcu_is_watching(void) -{ - return true; -} - -#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ - -static inline void rcu_request_urgent_qs_task(struct task_struct *t) -{ -} - -static inline void rcu_all_qs(void) -{ - barrier(); /* Avoid RCU read-side critical sections leaking across. */ -} +/* Avoid RCU read-side critical sections leaking across. */ +static inline void rcu_all_qs(void) { barrier(); } /* RCUtree hotplug events */ #define rcutree_prepare_cpu NULL diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 0bacb6b2af69..37d6fd3b7ff8 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -79,37 +79,20 @@ void cond_synchronize_rcu(unsigned long oldstate); unsigned long get_state_synchronize_sched(void); void cond_synchronize_sched(unsigned long oldstate); -extern unsigned long rcutorture_testseq; -extern unsigned long rcutorture_vernum; -unsigned long rcu_batches_started(void); -unsigned long rcu_batches_started_bh(void); -unsigned long rcu_batches_started_sched(void); -unsigned long rcu_batches_completed(void); -unsigned long rcu_batches_completed_bh(void); -unsigned long rcu_batches_completed_sched(void); -unsigned long rcu_exp_batches_completed(void); -unsigned long rcu_exp_batches_completed_sched(void); -void show_rcu_gp_kthreads(void); - -void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); -void rcu_sched_force_quiescent_state(void); - void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); +bool rcu_irq_enter_disabled(void); void exit_rcu(void); void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; - +void rcu_end_inkernel_boot(void); bool rcu_is_watching(void); -void rcu_request_urgent_qs_task(struct task_struct *t); - void rcu_all_qs(void); /* RCUtree hotplug events */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 59248dcc6ef3..d9510e8522d4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -369,6 +369,26 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) +/** + * spin_unlock_wait - Interpose between successive critical sections + * @lock: the spinlock whose critical sections are to be interposed. + * + * Semantically this is equivalent to a spin_lock() immediately + * followed by a spin_unlock(). However, most architectures have + * more efficient implementations in which the spin_unlock_wait() + * cannot block concurrent lock acquisition, and in some cases + * where spin_unlock_wait() does not write to the lock variable. + * Nevertheless, spin_unlock_wait() can have high overhead, so if + * you feel the need to use it, please check to see if there is + * a better way to get your job done. + * + * The ordering guarantees provided by spin_unlock_wait() are: + * + * 1. All accesses preceding the spin_unlock_wait() happen before + * any accesses in later critical sections for this same lock. + * 2. All accesses following the spin_unlock_wait() happen after + * any accesses in earlier critical sections for this same lock. + */ static __always_inline void spin_unlock_wait(spinlock_t *lock) { raw_spin_unlock_wait(&lock->rlock); diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4c1d5f7e62c4..39af9bc0f653 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -60,32 +60,15 @@ int init_srcu_struct(struct srcu_struct *sp); #include <linux/srcutiny.h> #elif defined(CONFIG_TREE_SRCU) #include <linux/srcutree.h> -#elif defined(CONFIG_CLASSIC_SRCU) -#include <linux/srcuclassic.h> -#else +#elif defined(CONFIG_SRCU) #error "Unknown SRCU implementation specified to kernel configuration" +#else +/* Dummy definition for things like notifiers. Actual use gets link error. */ +struct srcu_struct { }; #endif -/** - * call_srcu() - Queue a callback for invocation after an SRCU grace period - * @sp: srcu_struct in queue the callback - * @head: structure to be used for queueing the SRCU callback. - * @func: function to be invoked after the SRCU grace period - * - * The callback function will be invoked some time after a full SRCU - * grace period elapses, in other words after all pre-existing SRCU - * read-side critical sections have completed. However, the callback - * function might well execute concurrently with other SRCU read-side - * critical sections that started after call_srcu() was invoked. SRCU - * read-side critical sections are delimited by srcu_read_lock() and - * srcu_read_unlock(), and may be nested. - * - * The callback will be invoked from process context, but must nevertheless - * be fast and must not block. - */ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, void (*func)(struct rcu_head *head)); - void cleanup_srcu_struct(struct srcu_struct *sp); int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h deleted file mode 100644 index 5753f7322262..000000000000 --- a/include/linux/srcuclassic.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Sleepable Read-Copy Update mechanism for mutual exclusion, - * classic v4.11 variant. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * - * Copyright (C) IBM Corporation, 2017 - * - * Author: Paul McKenney <paulmck@us.ibm.com> - */ - -#ifndef _LINUX_SRCU_CLASSIC_H -#define _LINUX_SRCU_CLASSIC_H - -struct srcu_array { - unsigned long lock_count[2]; - unsigned long unlock_count[2]; -}; - -struct rcu_batch { - struct rcu_head *head, **tail; -}; - -#define RCU_BATCH_INIT(name) { NULL, &(name.head) } - -struct srcu_struct { - unsigned long completed; - struct srcu_array __percpu *per_cpu_ref; - spinlock_t queue_lock; /* protect ->batch_queue, ->running */ - bool running; - /* callbacks just queued */ - struct rcu_batch batch_queue; - /* callbacks try to do the first check_zero */ - struct rcu_batch batch_check0; - /* callbacks done with the first check_zero and the flip */ - struct rcu_batch batch_check1; - struct rcu_batch batch_done; - struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -}; - -void process_srcu(struct work_struct *work); - -#define __SRCU_STRUCT_INIT(name) \ - { \ - .completed = -300, \ - .per_cpu_ref = &name##_srcu_array, \ - .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .running = false, \ - .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ - .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ - .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ - .batch_done = RCU_BATCH_INIT(name.batch_done), \ - .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ - __SRCU_DEP_MAP_INIT(name) \ - } - -/* - * Define and initialize a srcu struct at build time. - * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. - * - * Note that although DEFINE_STATIC_SRCU() hides the name from other - * files, the per-CPU variable rules nevertheless require that the - * chosen name be globally unique. These rules also prohibit use of - * DEFINE_STATIC_SRCU() within a function. If these rules are too - * restrictive, declare the srcu_struct manually. For example, in - * each file: - * - * static struct srcu_struct my_srcu; - * - * Then, before the first use of each my_srcu, manually initialize it: - * - * init_srcu_struct(&my_srcu); - * - * See include/linux/percpu-defs.h for the rules on per-CPU variables. - */ -#define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\ - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) -#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) -#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) - -void synchronize_srcu_expedited(struct srcu_struct *sp); -void srcu_barrier(struct srcu_struct *sp); -unsigned long srcu_batches_completed(struct srcu_struct *sp); - -static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, - struct srcu_struct *sp, int *flags, - unsigned long *gpnum, - unsigned long *completed) -{ - if (test_type != SRCU_FLAVOR) - return; - *flags = 0; - *completed = sp->completed; - *gpnum = *completed; - if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head) - (*gpnum)++; -} - -#endif diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 42311ee0334f..cfbfc540cafc 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -27,15 +27,14 @@ #include <linux/swait.h> struct srcu_struct { - int srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ + short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ + short srcu_idx; /* Current reader array element. */ + u8 srcu_gp_running; /* GP workqueue running? */ + u8 srcu_gp_waiting; /* GP waiting for readers? */ struct swait_queue_head srcu_wq; /* Last srcu_read_unlock() wakes GP. */ - unsigned long srcu_gp_seq; /* GP seq # for callback tagging. */ - struct rcu_segcblist srcu_cblist; - /* Pending SRCU callbacks. */ - int srcu_idx; /* Current reader array element. */ - bool srcu_gp_running; /* GP workqueue running? */ - bool srcu_gp_waiting; /* GP waiting for readers? */ + struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */ + struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */ struct work_struct srcu_work; /* For driving grace periods. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -47,7 +46,7 @@ void srcu_drive_gp(struct work_struct *wp); #define __SRCU_STRUCT_INIT(name) \ { \ .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ - .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist), \ + .srcu_cb_tail = &name.srcu_cb_head, \ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ __SRCU_DEP_MAP_INIT(name) \ } @@ -63,31 +62,29 @@ void srcu_drive_gp(struct work_struct *wp); void synchronize_srcu(struct srcu_struct *sp); -static inline void synchronize_srcu_expedited(struct srcu_struct *sp) +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct. Can be invoked from irq/bh handlers, but the matching + * __srcu_read_unlock() must be in the same handler instance. Returns an + * index that must be passed to the matching srcu_read_unlock(). + */ +static inline int __srcu_read_lock(struct srcu_struct *sp) { - synchronize_srcu(sp); -} + int idx; -static inline void srcu_barrier(struct srcu_struct *sp) -{ - synchronize_srcu(sp); + idx = READ_ONCE(sp->srcu_idx); + WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); + return idx; } -static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) +static inline void synchronize_srcu_expedited(struct srcu_struct *sp) { - return 0; + synchronize_srcu(sp); } -static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, - struct srcu_struct *sp, int *flags, - unsigned long *gpnum, - unsigned long *completed) +static inline void srcu_barrier(struct srcu_struct *sp) { - if (test_type != SRCU_FLAVOR) - return; - *flags = 0; - *completed = sp->srcu_gp_seq; - *gpnum = *completed; + synchronize_srcu(sp); } #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 32e86d85fd11..42973f787e7e 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -40,7 +40,7 @@ struct srcu_data { unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ /* Update-side state. */ - spinlock_t lock ____cacheline_internodealigned_in_smp; + raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp; struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ @@ -58,7 +58,7 @@ struct srcu_data { * Node in SRCU combining tree, similar in function to rcu_data. */ struct srcu_node { - spinlock_t lock; + raw_spinlock_t __private lock; unsigned long srcu_have_cbs[4]; /* GP seq for children */ /* having CBs, but only */ /* is > ->srcu_gq_seq. */ @@ -78,7 +78,7 @@ struct srcu_struct { struct srcu_node *level[RCU_NUM_LVLS + 1]; /* First node at each level. */ struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ - spinlock_t gp_lock; /* protect ->srcu_cblist */ + raw_spinlock_t __private lock; /* Protect counters */ struct mutex srcu_gp_mutex; /* Serialize GP work. */ unsigned int srcu_idx; /* Current rdr array element. */ unsigned long srcu_gp_seq; /* Grace-period seq #. */ @@ -109,7 +109,7 @@ void process_srcu(struct work_struct *work); #define __SRCU_STRUCT_INIT(name) \ { \ .sda = &name##_srcu_data, \ - .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ .srcu_gp_seq_needed = 0 - 1, \ __SRCU_DEP_MAP_INIT(name) \ } @@ -141,10 +141,5 @@ void process_srcu(struct work_struct *work); void synchronize_srcu_expedited(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *sp); -unsigned long srcu_batches_completed(struct srcu_struct *sp); - -void srcutorture_get_gp_data(enum rcutorture_type test_type, - struct srcu_struct *sp, int *flags, - unsigned long *gpnum, unsigned long *completed); #endif diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index e3facb356838..91dc089d65b7 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -742,6 +742,7 @@ TRACE_EVENT(rcu_torture_read, * "OnlineQ": _rcu_barrier() found online CPU with callbacks. * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. + * "IRQNQ": An rcu_barrier_callback() callback found no callbacks. * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "LastCB": An rcu_barrier_callback() invoked the last callback. * "Inc2": _rcu_barrier() piggyback check counter incremented. |