aboutsummaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner2010-06-16 16:58:34 +0200
committerIngo Molnar2011-09-13 11:12:00 +0200
commit2737c49f29a29f3d3645ba0778aa7a8798f32249 (patch)
tree28a56a7f4b31abe2ef79ff535755677781a70c3a /kernel/time
parentddb6c9b58a19edcfac93ac670b066c836ff729f1 (diff)
locking, timer_stats: Annotate table_lock as raw
The table_lock lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Reported-by: Andreas Sundebo <kernel@sundebo.dk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Andreas Sundebo <kernel@sundebo.dk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/timer_stats.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index a5d0a3a85dd8..0b537f27b559 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -81,7 +81,7 @@ struct entry {
/*
* Spinlock protecting the tables - not taken during lookup:
*/
-static DEFINE_SPINLOCK(table_lock);
+static DEFINE_RAW_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
prev = NULL;
curr = *head;
- spin_lock(&table_lock);
+ raw_spin_lock(&table_lock);
/*
* Make sure we have not raced with another CPU:
*/
@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
*head = curr;
}
out_unlock:
- spin_unlock(&table_lock);
+ raw_spin_unlock(&table_lock);
return curr;
}