aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86_64/kernel/time.c94
1 files changed, 45 insertions, 49 deletions
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 2f7c21206574..bb6cb83450b2 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -88,7 +88,8 @@ static inline unsigned int do_gettimeoffset_tsc(void)
unsigned long t;
unsigned long x;
t = get_cycles_sync();
- if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
+ if (t < vxtime.last_tsc)
+ t = vxtime.last_tsc; /* hack */
x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
return x;
}
@@ -178,8 +179,9 @@ unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
- /* Assume the lock function has either no stack frame or only a single word.
- This checks if the address on the stack looks like a kernel text address.
+ /* Assume the lock function has either no stack frame or only a single
+ word. This checks if the address on the stack looks like a kernel
+ text address.
There is a small window for false hits, but in that case the tick
is just accounted to the spinlock function.
Better would be to write these functions in assembler again
@@ -293,8 +295,7 @@ unsigned long long monotonic_clock(void)
this_offset = hpet_readl(HPET_COUNTER);
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
- offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
- return base + offset;
+ offset *= (NSEC_PER_SEC/HZ) / hpet_tick;
} else {
do {
seq = read_seqbegin(&xtime_lock);
@@ -303,50 +304,46 @@ unsigned long long monotonic_clock(void)
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
this_offset = get_cycles_sync();
- offset = (this_offset - last_offset)*1000/cpu_khz;
- return base + offset;
+ offset = (this_offset - last_offset)*1000 / cpu_khz;
}
+ return base + offset;
}
EXPORT_SYMBOL(monotonic_clock);
static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
{
- static long lost_count;
- static int warned;
-
- if (report_lost_ticks) {
- printk(KERN_WARNING "time.c: Lost %d timer "
- "tick(s)! ", lost);
- print_symbol("rip %s)\n", regs->rip);
- }
-
- if (lost_count == 1000 && !warned) {
- printk(KERN_WARNING
- "warning: many lost ticks.\n"
- KERN_WARNING "Your time source seems to be instable or "
+ static long lost_count;
+ static int warned;
+ if (report_lost_ticks) {
+ printk(KERN_WARNING "time.c: Lost %d timer tick(s)! ", lost);
+ print_symbol("rip %s)\n", regs->rip);
+ }
+
+ if (lost_count == 1000 && !warned) {
+ printk(KERN_WARNING "warning: many lost ticks.\n"
+ KERN_WARNING "Your time source seems to be instable or "
"some driver is hogging interupts\n");
- print_symbol("rip %s\n", regs->rip);
- if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
- printk(KERN_WARNING "Falling back to HPET\n");
- if (hpet_use_timer)
- vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
- else
- vxtime.last = hpet_readl(HPET_COUNTER);
- vxtime.mode = VXTIME_HPET;
- do_gettimeoffset = do_gettimeoffset_hpet;
- }
- /* else should fall back to PIT, but code missing. */
- warned = 1;
- } else
- lost_count++;
+ print_symbol("rip %s\n", regs->rip);
+ if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
+ printk(KERN_WARNING "Falling back to HPET\n");
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) -
+ hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
+ vxtime.mode = VXTIME_HPET;
+ do_gettimeoffset = do_gettimeoffset_hpet;
+ }
+ /* else should fall back to PIT, but code missing. */
+ warned = 1;
+ } else
+ lost_count++;
#ifdef CONFIG_CPU_FREQ
- /* In some cases the CPU can change frequency without us noticing
- (like going into thermal throttle)
- Give cpufreq a change to catch up. */
- if ((lost_count+1) % 25 == 0) {
- cpufreq_delayed_get();
- }
+ /* In some cases the CPU can change frequency without us noticing
+ Give cpufreq a change to catch up. */
+ if ((lost_count+1) % 25 == 0)
+ cpufreq_delayed_get();
#endif
}
@@ -556,7 +553,7 @@ static unsigned long get_cmos_time(void)
/*
* We know that x86-64 always uses BCD format, no need to check the
* config register.
- */
+ */
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
@@ -618,7 +615,8 @@ static void cpufreq_delayed_get(void)
cpufreq_delayed_issched = 1;
if (!warned) {
warned = 1;
- printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
+ printk(KERN_DEBUG
+ "Losing some ticks... checking if CPU frequency changed.\n");
}
schedule_work(&cpufreq_delayed_get_work);
}
@@ -641,9 +639,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
lpj = &dummy;
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
#ifdef CONFIG_SMP
- lpj = &cpu_data[freq->cpu].loops_per_jiffy;
+ lpj = &cpu_data[freq->cpu].loops_per_jiffy;
#else
- lpj = &boot_cpu_data.loops_per_jiffy;
+ lpj = &boot_cpu_data.loops_per_jiffy;
#endif
if (!ref_freq) {
@@ -780,9 +778,8 @@ static __init int late_hpet_init(void)
int i;
hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
-
- for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
- timer++, i++)
+ timer = &hpet->hpet_timers[2];
+ for (i = 2; i < ntimer; timer++, i++)
hd.hd_irq[i] = (timer->hpet_config &
Tn_INT_ROUTE_CNF_MASK) >>
Tn_INT_ROUTE_CNF_SHIFT;
@@ -939,8 +936,7 @@ void __init time_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
if (!hpet_init())
- vxtime_hz = (1000000000000000L + hpet_period / 2) /
- hpet_period;
+ vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period;
else
vxtime.hpet_address = 0;