diff options
author | Ingo Molnar | 2014-06-25 07:37:49 +0200 |
---|---|---|
committer | Ingo Molnar | 2014-06-25 07:37:49 +0200 |
commit | 5cfec3422adcc1987a1b5fc5ff59ad42a1bc910e (patch) | |
tree | efb9591a225f1c760ab26a951f7c8623368225b2 /include | |
parent | a497c3ba1d97fc69c1e78e7b96435ba8c2cb42ee (diff) | |
parent | 4a81e8328d3791a4f99bf5b436d050f6dc5ffea3 (diff) |
Merge branch 'urgent.2014.06.23a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/urgent
Pull RCU fixes from Paul E. McKenney:
" This series includes the following:
1. Export a pair of debug-object interfaces for RCU that will
allow the slab allocators to avoid a recursion bug located
by Sasha Levin. Strictly speaking, this is not a regression,
but it would be good to enable the fix.
2. Address a serious performance regression on an open/close
micro-benchmark located by Dave Hansen. The offending commit
is ac1bea85781e (Make cond_resched() report RCU quiescent states). "
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rcupdate.h | 46 |
1 files changed, 10 insertions, 36 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5a75d19aa661..6a94cc8b1ca0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -44,7 +44,6 @@ #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> -#include <linux/percpu.h> #include <asm/barrier.h> extern int rcu_expedited; /* for sysctl */ @@ -300,41 +299,6 @@ bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ /* - * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. - */ - -#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */ -DECLARE_PER_CPU(int, rcu_cond_resched_count); -void rcu_resched(void); - -/* - * Is it time to report RCU quiescent states? - * - * Note unsynchronized access to rcu_cond_resched_count. Yes, we might - * increment some random CPU's count, and possibly also load the result from - * yet another CPU's count. We might even clobber some other CPU's attempt - * to zero its counter. This is all OK because the goal is not precision, - * but rather reasonable amortization of rcu_note_context_switch() overhead - * and extremely high probability of avoiding RCU CPU stall warnings. - * Note that this function has to be preempted in just the wrong place, - * many thousands of times in a row, for anything bad to happen. - */ -static inline bool rcu_should_resched(void) -{ - return raw_cpu_inc_return(rcu_cond_resched_count) >= - RCU_COND_RESCHED_LIM; -} - -/* - * Report quiscent states to RCU if it is time to do so. - */ -static inline void rcu_cond_resched(void) -{ - if (unlikely(rcu_should_resched())) - rcu_resched(); -} - -/* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ @@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf); * initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +void init_rcu_head(struct rcu_head *head); +void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void init_rcu_head(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head(struct rcu_head *head) +{ +} + static inline void init_rcu_head_on_stack(struct rcu_head *head) { } |