aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras2006-03-15 13:47:15 +1100
committerPaul Mackerras2006-03-16 16:54:55 +1100
commit0a45d4491d0f172e02126370f312405c5d473363 (patch)
treef3f3465d4a24704518e7625334d78add58008b1f /arch/powerpc
parent82dfdcae0d57c842e02f037758687eef42fb7af6 (diff)
powerpc: Fix problem with time going backwards
The recent changes to keep gettimeofday in sync with xtime had the side effect that it was occasionally possible for the time reported by gettimeofday to go back by a microsecond. There were two reasons: (1) when we recalculated the offsets used by gettimeofday every 2^31 timebase ticks, we lost an accumulated fractional microsecond, and (2) because the update is done some time after the notional start of jiffy, if ntp is slowing the clock, it is possible to see time go backwards when the timebase factor gets reduced. This fixes it by (a) slowing the gettimeofday clock by about 1us in 2^31 timebase ticks (a factor of less than 1 in 3.7 million), and (b) adjusting the timebase offsets in the rare case that the gettimeofday result could possibly go backwards (i.e. when ntp is slowing the clock and the timer interrupt is late). In this case the adjustment will reduce to zero eventually because of (a). Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/time.c48
1 files changed, 34 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2a7ddc579379..86f7e3d154d8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -283,9 +283,9 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
* the two values of tb_update_count match and are even then the
* tb_to_xs and stamp_xsec values are consistent. If not, then it
* loops back and reads them again until this criteria is met.
+ * We expect the caller to have done the first increment of
+ * vdso_data->tb_update_count already.
*/
- ++(vdso_data->tb_update_count);
- smp_wmb();
vdso_data->tb_orig_stamp = new_tb_stamp;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
@@ -310,20 +310,15 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
unsigned long offset;
u64 new_stamp_xsec;
u64 tlen, t2x;
+ u64 tb, xsec_old, xsec_new;
+ struct gettimeofday_vars *varp;
if (__USE_RTC())
return;
tlen = current_tick_length();
offset = cur_tb - do_gtod.varp->tb_orig_stamp;
- if (tlen == last_tick_len && offset < 0x80000000u) {
- /* check that we're still in sync; if not, resync */
- struct timeval tv;
- __do_gettimeofday(&tv, cur_tb);
- if (tv.tv_sec <= xtime.tv_sec &&
- (tv.tv_sec < xtime.tv_sec ||
- tv.tv_usec * 1000 <= xtime.tv_nsec))
- return;
- }
+ if (tlen == last_tick_len && offset < 0x80000000u)
+ return;
if (tlen != last_tick_len) {
t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
last_tick_len = tlen;
@@ -332,6 +327,21 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
do_div(new_stamp_xsec, 1000000000);
new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
+
+ ++vdso_data->tb_update_count;
+ smp_mb();
+
+ /*
+ * Make sure time doesn't go backwards for userspace gettimeofday.
+ */
+ tb = get_tb();
+ varp = do_gtod.varp;
+ xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
+ + varp->stamp_xsec;
+ xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
+ if (xsec_new < xsec_old)
+ new_stamp_xsec += xsec_old - xsec_new;
+
update_gtod(cur_tb, new_stamp_xsec, t2x);
}
@@ -564,6 +574,10 @@ int do_settimeofday(struct timespec *tv)
}
#endif
+ /* Make userspace gettimeofday spin until we're done. */
+ ++vdso_data->tb_update_count;
+ smp_mb();
+
/*
* Subtract off the number of nanoseconds since the
* beginning of the last tick.
@@ -724,10 +738,16 @@ void __init time_init(void)
* It is computed as:
* ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
* where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
- * so as to give the result as a 0.64 fixed-point fraction.
+ * which turns out to be N = 51 - SHIFT_HZ.
+ * This gives the result as a 0.64 fixed-point fraction.
+ * That value is reduced by an offset amounting to 1 xsec per
+ * 2^31 timebase ticks to avoid problems with time going backwards
+ * by 1 xsec when we do timer_recalc_offset due to losing the
+ * fractional xsec. That offset is equal to ppc_tb_freq/2^51
+ * since there are 2^20 xsec in a second.
*/
- div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
- tb_ticks_per_jiffy, &res);
+ div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
+ tb_ticks_per_jiffy << SHIFT_HZ, &res);
div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
ticklen_to_xs = res.result_low;