aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney2015-06-11 14:50:22 -0700
committerPaul E. McKenney2015-07-17 14:58:42 -0700
commit9b683874504a57cfa97558d403c75e286e20c9ce (patch)
tree7feefb3d2a33e5bf0a6ff9b96c06fdf5db215eba
parent13bd64947f53ba8d7199922be94b6626b8e222d7 (diff)
rcu: Stop disabling CPU hotplug in synchronize_rcu_expedited()
The fact that tasks could be migrated from leaf to root rcu_node structures meant that synchronize_rcu_expedited() had to disable CPU hotplug. However, tasks now stay put, so this commit removes the CPU-hotplug disabling from synchronize_rcu_expedited(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcu/tree_plugin.h25
1 files changed, 2 insertions, 23 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 5dac0a10a985..7234f03e0aa2 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -728,42 +728,23 @@ void synchronize_rcu_expedited(void)
smp_mb(); /* Above access cannot bleed into critical section. */
/*
- * Block CPU-hotplug operations. This means that any CPU-hotplug
- * operation that finds an rcu_node structure with tasks in the
- * process of being boosted will know that all tasks blocking
- * this expedited grace period will already be in the process of
- * being boosted. This simplifies the process of moving tasks
- * from leaf to root rcu_node structures.
- */
- if (!try_get_online_cpus()) {
- /* CPU-hotplug operation in flight, fall back to normal GP. */
- wait_rcu_gp(call_rcu);
- return;
- }
-
- /*
* Acquire lock, falling back to synchronize_rcu() if too many
* lock-acquisition failures. Of course, if someone does the
* expedited grace period for us, just leave.
*/
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
if (ULONG_CMP_LT(snap,
- READ_ONCE(sync_rcu_preempt_exp_count))) {
- put_online_cpus();
+ READ_ONCE(sync_rcu_preempt_exp_count)))
goto mb_ret; /* Others did our work for us. */
- }
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
} else {
- put_online_cpus();
wait_rcu_gp(call_rcu);
return;
}
}
- if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
- put_online_cpus();
+ if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
goto unlock_mb_ret; /* Others did our work for us. */
- }
/* force all RCU readers onto ->blkd_tasks lists. */
synchronize_sched_expedited();
@@ -779,8 +760,6 @@ void synchronize_rcu_expedited(void)
rcu_for_each_leaf_node(rsp, rnp)
sync_rcu_preempt_exp_init2(rsp, rnp);
- put_online_cpus();
-
/* Wait for snapshotted ->blkd_tasks lists to drain. */
rnp = rcu_get_root(rsp);
wait_event(sync_rcu_preempt_exp_wq,