aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney2017-01-26 13:45:38 -0800
committerPaul E. McKenney2017-04-18 11:38:17 -0700
commitabb06b99484a9f5af05c7147c289faf835f68e8e (patch)
treeff5a96680bf5e7049ab9a8ee9c687fc88ca59eef /kernel/rcu
parent88a4976d0e37c0797ff3e6579a5f91cb7dced90d (diff)
rcu: Pull rcu_sched_qs_mask into rcu_dynticks structure
The rcu_sched_qs_mask variable is yet another isolated per-CPU variable, so this commit pulls it into the pre-existing rcu_dynticks per-CPU structure. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c12
-rw-r--r--kernel/rcu/tree.h1
2 files changed, 6 insertions, 7 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3747277aae67..3a0703035874 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -272,8 +272,6 @@ void rcu_bh_qs(void)
}
}
-static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-
/*
* Steal a bit from the bottom of ->dynticks for idle entry/exit
* control. Initially this is for TLB flushing.
@@ -464,8 +462,8 @@ static void rcu_momentary_dyntick_idle(void)
* Yes, we can lose flag-setting operations. This is OK, because
* the flag will be set again after some delay.
*/
- resched_mask = raw_cpu_read(rcu_sched_qs_mask);
- raw_cpu_write(rcu_sched_qs_mask, 0);
+ resched_mask = raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask);
+ raw_cpu_write(rcu_dynticks.rcu_sched_qs_mask, 0);
/* Find the flavor that needs a quiescent state. */
for_each_rcu_flavor(rsp) {
@@ -499,7 +497,7 @@ void rcu_note_context_switch(void)
trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs();
rcu_preempt_note_context_switch();
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask)))
rcu_momentary_dyntick_idle();
trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -524,7 +522,7 @@ void rcu_all_qs(void)
unsigned long flags;
barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
+ if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask))) {
local_irq_save(flags);
rcu_momentary_dyntick_idle();
local_irq_restore(flags);
@@ -1351,7 +1349,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
* is set too high, we override with half of the RCU CPU stall
* warning delay.
*/
- rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
+ rcrmp = &per_cpu(rcu_dynticks.rcu_sched_qs_mask, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
time_after(jiffies, rdp->rsp->jiffies_resched)) {
if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 7468b4de7e0c..e298281984dc 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -113,6 +113,7 @@ struct rcu_dynticks {
/* Process level is worth LLONG_MAX/2. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
+ int rcu_sched_qs_mask; /* GP old, need quiescent state. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
long long dynticks_idle_nesting;
/* irq/process nesting level from idle. */