aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar2012-09-27 08:09:38 +0200
committerIngo Molnar2012-09-27 08:09:38 +0200
commitfa34da708cbe1e2d9a2ee7fc68ea8fccbf095d12 (patch)
treeb20795decb064af75aae8f9f6af77806b1a4769a /kernel
parenta9b86fab4b0a36fc4cd2712a07259c2c0e769742 (diff)
parentcb349ca95407cbc11424d5e9fc7c8e700709041b (diff)
Merge branch 'rcu/idle' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull the RCU adaptive-idle feature from Paul E. McKenney: "This series adds RCU APIs that allow non-idle tasks to enter RCU idle mode and provides x86 code to make use of them, allowing RCU to treat user-mode execution as an extended quiescent state when the new RCU_USER_QS kernel configuration parameter is specified. Work is in progress to port this to a few other architectures, but is not part of this series." Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c210
-rw-r--r--kernel/rcutree.h4
-rw-r--r--kernel/rcutree_plugin.h20
-rw-r--r--kernel/sched/core.c17
4 files changed, 224 insertions, 27 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7387e46009d9..4fb2376ddf06 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -206,6 +206,9 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
+#if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE)
+ .ignore_user_qs = true,
+#endif
};
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
@@ -322,16 +325,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
}
/*
- * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
+ * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
*
* If the new value of the ->dynticks_nesting counter now is zero,
* we really have entered idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
-static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
+static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
+ bool user)
{
trace_rcu_dyntick("Start", oldval, 0);
- if (!is_idle_task(current)) {
+ if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
@@ -348,7 +352,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
/*
- * The idle task is not permitted to enter the idle loop while
+ * It is illegal to enter an extended quiescent state while
* in an RCU read-side critical section.
*/
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
@@ -359,6 +363,25 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
"Illegal idle entry in RCU-sched read-side critical section.");
}
+/*
+ * Enter an RCU extended quiescent state, which can be either the
+ * idle loop or adaptive-tickless usermode execution.
+ */
+static void rcu_eqs_enter(bool user)
+{
+ long long oldval;
+ struct rcu_dynticks *rdtp;
+
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
+ if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
+ rdtp->dynticks_nesting = 0;
+ else
+ rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
+ rcu_eqs_enter_common(rdtp, oldval, user);
+}
+
/**
* rcu_idle_enter - inform RCU that current CPU is entering idle
*
@@ -374,21 +397,70 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
void rcu_idle_enter(void)
{
unsigned long flags;
- long long oldval;
+
+ local_irq_save(flags);
+ rcu_eqs_enter(false);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(rcu_idle_enter);
+
+#ifdef CONFIG_RCU_USER_QS
+/**
+ * rcu_user_enter - inform RCU that we are resuming userspace.
+ *
+ * Enter RCU idle mode right before resuming userspace. No use of RCU
+ * is permitted between this call and rcu_user_exit(). This way the
+ * CPU doesn't need to maintain the tick for RCU maintenance purposes
+ * when the CPU runs in userspace.
+ */
+void rcu_user_enter(void)
+{
+ unsigned long flags;
struct rcu_dynticks *rdtp;
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ WARN_ON_ONCE(!current->mm);
+
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- oldval = rdtp->dynticks_nesting;
- WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
- if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
- rdtp->dynticks_nesting = 0;
- else
- rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
- rcu_idle_enter_common(rdtp, oldval);
+ if (!rdtp->ignore_user_qs && !rdtp->in_user) {
+ rdtp->in_user = true;
+ rcu_eqs_enter(true);
+ }
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(rcu_idle_enter);
+
+/**
+ * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace
+ * after the current irq returns.
+ *
+ * This is similar to rcu_user_enter() but in the context of a non-nesting
+ * irq. After this call, RCU enters into idle mode when the interrupt
+ * returns.
+ */
+void rcu_user_enter_after_irq(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ /* Ensure this irq is interrupting a non-idle RCU state. */
+ WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK));
+ rdtp->dynticks_nesting = 1;
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_RCU_USER_QS */
/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
@@ -420,18 +492,19 @@ void rcu_irq_exit(void)
if (rdtp->dynticks_nesting)
trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
else
- rcu_idle_enter_common(rdtp, oldval);
+ rcu_eqs_enter_common(rdtp, oldval, true);
local_irq_restore(flags);
}
/*
- * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
+ * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
*
* If the new value of the ->dynticks_nesting counter was previously zero,
* we really have exited idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
-static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
+static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
+ int user)
{
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
@@ -440,7 +513,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
- if (!is_idle_task(current)) {
+ if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on exit: not idle task",
@@ -452,6 +525,25 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
}
}
+/*
+ * Exit an RCU extended quiescent state, which can be either the
+ * idle loop or adaptive-tickless usermode execution.
+ */
+static void rcu_eqs_exit(bool user)
+{
+ struct rcu_dynticks *rdtp;
+ long long oldval;
+
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ WARN_ON_ONCE(oldval < 0);
+ if (oldval & DYNTICK_TASK_NEST_MASK)
+ rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
+ else
+ rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+ rcu_eqs_exit_common(rdtp, oldval, user);
+}
+
/**
* rcu_idle_exit - inform RCU that current CPU is leaving idle
*
@@ -466,21 +558,67 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
void rcu_idle_exit(void)
{
unsigned long flags;
+
+ local_irq_save(flags);
+ rcu_eqs_exit(false);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(rcu_idle_exit);
+
+#ifdef CONFIG_RCU_USER_QS
+/**
+ * rcu_user_exit - inform RCU that we are exiting userspace.
+ *
+ * Exit RCU idle mode while entering the kernel because it can
+ * run a RCU read side critical section anytime.
+ */
+void rcu_user_exit(void)
+{
+ unsigned long flags;
struct rcu_dynticks *rdtp;
- long long oldval;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- oldval = rdtp->dynticks_nesting;
- WARN_ON_ONCE(oldval < 0);
- if (oldval & DYNTICK_TASK_NEST_MASK)
- rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
- else
- rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
- rcu_idle_exit_common(rdtp, oldval);
+ if (rdtp->in_user) {
+ rdtp->in_user = false;
+ rcu_eqs_exit(true);
+ }
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(rcu_idle_exit);
+
+/**
+ * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace
+ * idle mode after the current non-nesting irq returns.
+ *
+ * This is similar to rcu_user_exit() but in the context of an irq.
+ * This is called when the irq has interrupted a userspace RCU idle mode
+ * context. When the current non-nesting interrupt returns after this call,
+ * the CPU won't restore the RCU idle mode.
+ */
+void rcu_user_exit_after_irq(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ /* Ensure we are interrupting an RCU idle mode. */
+ WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK);
+ rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE;
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_RCU_USER_QS */
/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -515,7 +653,7 @@ void rcu_irq_enter(void)
if (oldval)
trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
else
- rcu_idle_exit_common(rdtp, oldval);
+ rcu_eqs_exit_common(rdtp, oldval, true);
local_irq_restore(flags);
}
@@ -579,6 +717,21 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
+#ifdef CONFIG_RCU_USER_QS
+void rcu_user_hooks_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct rcu_dynticks *rdtp;
+
+ /* Interrupts are disabled in context switch */
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ if (!rdtp->ignore_user_qs) {
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
+ set_tsk_thread_flag(next, TIF_NOHZ);
+ }
+}
+#endif /* #ifdef CONFIG_RCU_USER_QS */
+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
@@ -2473,6 +2626,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
+#ifdef CONFIG_RCU_USER_QS
+ WARN_ON_ONCE(rdp->dynticks->in_user);
+#endif
rdp->cpu = cpu;
rdp->rsp = rsp;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7576fd4d8ce6..5faf05d68326 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -102,6 +102,10 @@ struct rcu_dynticks {
/* idle-period nonlazy_posted snapshot. */
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+#ifdef CONFIG_RCU_USER_QS
+ bool ignore_user_qs; /* Treat userspace as extended QS or not */
+ bool in_user; /* Is the CPU in userland from RCU POV? */
+#endif
};
/* RCU's kthread states for tracing. */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 9c71c1b18e03..f92115488187 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1757,6 +1757,26 @@ static void rcu_prepare_for_idle(int cpu)
if (!tne)
return;
+ /* Adaptive-tick mode, where usermode execution is idle to RCU. */
+ if (!is_idle_task(current)) {
+ rdtp->dyntick_holdoff = jiffies - 1;
+ if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+ trace_rcu_prep_idle("User dyntick with callbacks");
+ rdtp->idle_gp_timer_expires =
+ round_up(jiffies + RCU_IDLE_GP_DELAY,
+ RCU_IDLE_GP_DELAY);
+ } else if (rcu_cpu_has_callbacks(cpu)) {
+ rdtp->idle_gp_timer_expires =
+ round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
+ trace_rcu_prep_idle("User dyntick with lazy callbacks");
+ } else {
+ return;
+ }
+ tp = &rdtp->idle_gp_timer;
+ mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+ return;
+ }
+
/*
* If this is an idle re-entry, for example, due to use of
* RCU_NONIDLE() or the new idle-loop tracing API within the idle
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1a48cdbc8631..3c4dec0594d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2081,6 +2081,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
#endif
/* Here we just switch the register state and the stack. */
+ rcu_switch(prev, next);
switch_to(prev, next, prev);
barrier();
@@ -3468,6 +3469,21 @@ asmlinkage void __sched schedule(void)
}
EXPORT_SYMBOL(schedule);
+#ifdef CONFIG_RCU_USER_QS
+asmlinkage void __sched schedule_user(void)
+{
+ /*
+ * If we come here after a random call to set_need_resched(),
+ * or we have been woken up remotely but the IPI has not yet arrived,
+ * we haven't yet exited the RCU idle mode. Do it here manually until
+ * we find a better solution.
+ */
+ rcu_user_exit();
+ schedule();
+ rcu_user_enter();
+}
+#endif
+
/**
* schedule_preempt_disabled - called with preemption disabled
*
@@ -3569,6 +3585,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
+ rcu_user_exit();
do {
add_preempt_count(PREEMPT_ACTIVE);
local_irq_enable();