From 24ef659a857c3cba40b64ea51ea4fce8d2fb7bbc Mon Sep 17 00:00:00 2001 From: Paul E. McKenney Date: Mon, 28 Oct 2013 09:22:24 -0700 Subject: rcu: Provide better diagnostics for blocking in RCU callback functions Currently blocking in an RCU callback function will result in "scheduling while atomic", which could be triggered for any number of reasons. To aid debugging, this patch introduces a rcu_callback_map that is used to tie the inappropriate voluntary context switch back to the fact that the function is being invoked from within a callback. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 3 +++ kernel/rcu/update.c | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 7859a0a3951e..a8f981a2d110 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -102,13 +102,16 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) { unsigned long offset = (unsigned long)head->func; + rcu_lock_acquire(&rcu_callback_map); if (__is_kfree_rcu_offset(offset)) { RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset)); kfree((void *)head - offset); + rcu_lock_release(&rcu_callback_map); return 1; } else { RCU_TRACE(trace_rcu_invoke_callback(rn, head)); head->func(head); + rcu_lock_release(&rcu_callback_map); return 0; } } diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 6cb3dff89e2b..802365ccd591 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -128,6 +128,11 @@ struct lockdep_map rcu_sched_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); EXPORT_SYMBOL_GPL(rcu_sched_lock_map); +static struct lock_class_key rcu_callback_key; +struct lockdep_map rcu_callback_map = + STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); +EXPORT_SYMBOL_GPL(rcu_callback_map); + int notrace debug_lockdep_rcu_enabled(void) { return rcu_scheduler_active && debug_locks && -- cgit v1.2.3