aboutsummaryrefslogtreecommitdiff
path: root/kernel/time/timekeeping.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r--kernel/time/timekeeping.c19
1 files changed, 4 insertions, 15 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d62682b6df4a..44b7e6bb081b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1360,7 +1360,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
* update_wall_time - Uses the current clocksource to increment the wall time
*
*/
-static void update_wall_time(void)
+void update_wall_time(void)
{
struct clocksource *clock;
struct timekeeper *real_tk = &timekeeper;
@@ -1441,19 +1441,8 @@ static void update_wall_time(void)
write_seqcount_end(&timekeeper_seq);
out:
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
- if (clock_was_set) {
- /*
- * XXX - I'd rather we just call clock_was_set(), but
- * since we're currently holding the jiffies lock, calling
- * clock_was_set would trigger an ipi which would then grab
- * the jiffies lock and we'd deadlock. :(
- * The right solution should probably be droping
- * the jiffies lock before calling update_wall_time
- * but that requires some rework of the tick sched
- * code.
- */
- clock_was_set_delayed();
- }
+ if (clock_set)
+ clock_was_set();
}
/**
@@ -1598,7 +1587,6 @@ struct timespec get_monotonic_coarse(void)
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
- update_wall_time();
calc_global_load(ticks);
}
@@ -1756,4 +1744,5 @@ void xtime_update(unsigned long ticks)
write_seqlock(&jiffies_lock);
do_timer(ticks);
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}