aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney2015-05-01 13:01:38 -0700
committerPaul E. McKenney2015-05-27 12:58:03 -0700
commit30ff1533b8f75255bdf02bc3361f1c558138f471 (patch)
tree7785fb47d3d21d1d61efa583a4d05eaa9e1a40bb /kernel/rcu/tree.c
parent5af4692a75daf08dddc93dbb4cd2a1b3d3b617af (diff)
rcu: Make synchronize_sched_expedited() call wait_rcu_gp()
Currently, synchronize_sched_expedited() will call synchronize_sched() if there is danger of counter wrap. But if configuration says to always do expedited grace periods, synchronize_sched() will just call synchronize_sched_expedited() right back again. In theory, the old expedited operations will complete, the counters will get back in synch, and the recursion will end. But we could easily run out of stack long before that time. This commit therefore makes synchronize_sched_expedited() invoke the underlying wait_rcu_gp(call_rcu_sched) instead of synchronize_sched(), the same as all the other calls out from synchronize_sched_expedited(). This bug was introduced by commit 1924bcb02597 (Avoid counter wrap in synchronize_sched_expedited()). Reported-by: Rik van Riel <riel@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 52f064ac7b49..f02830c85ec2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3290,7 +3290,7 @@ void synchronize_sched_expedited(void)
if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
(ulong)atomic_long_read(&rsp->expedited_done) +
ULONG_MAX / 8)) {
- synchronize_sched();
+ wait_rcu_gp(call_rcu_sched);
atomic_long_inc(&rsp->expedited_wrap);
return;
}