aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney2017-10-04 16:24:29 -0700
committerPaul E. McKenney2017-11-28 15:51:19 -0800
commitdec98900eae1e22467182e58688abe5fae98bd5f (patch)
treeabee049d6133dc260ca797e4ae2060d91f610491 /kernel/rcu
parent84585aa8b6ad24e5bdfba9db4a320a6aeed192ab (diff)
rcu: Add ->dynticks field to rcu_dyntick trace event
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 92de3bacda07..5febb76809f6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -761,13 +761,13 @@ static void rcu_eqs_enter_common(bool user)
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
+ trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id());
- trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
+ trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
@@ -880,15 +880,14 @@ void rcu_nmi_exit(void)
* leave it in non-RCU-idle state.
*/
if (rdtp->dynticks_nmi_nesting != 1) {
- trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting,
- rdtp->dynticks_nmi_nesting - 2);
+ trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
rdtp->dynticks_nmi_nesting - 2);
return;
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0);
+ trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
rcu_dynticks_eqs_enter();
}
@@ -953,14 +952,13 @@ static void rcu_eqs_exit_common(long newval, int user)
rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle();
- trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval);
+ trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval, rdtp->dynticks);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id());
- trace_rcu_dyntick(TPS("Error on exit: not idle task"),
- rdtp->dynticks_nesting, newval);
+ trace_rcu_dyntick(TPS("Error on exit: not idle task"), rdtp->dynticks_nesting, newval, rdtp->dynticks);
rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
@@ -1062,7 +1060,7 @@ void rcu_nmi_enter(void)
}
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdtp->dynticks_nmi_nesting,
- rdtp->dynticks_nmi_nesting + incby);
+ rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
rdtp->dynticks_nmi_nesting + incby);
barrier();