aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar2007-07-19 21:28:35 +0200
committerIngo Molnar2007-07-19 21:28:35 +0200
commite436d80085133858bf2613a630365e8a0459fd58 (patch)
treec579410a12ed63d94e3a40a023634c7df4aebc76
parent969bb4e4032dac67287951d8f6642a3b5119694e (diff)
[PATCH] sched: implement cpu_clock(cpu) high-speed time source
Implement the cpu_clock(cpu) interface for kernel-internal use: high-speed (but slightly incorrect) per-cpu clock constructed from sched_clock(). This API, unused at the moment, will be used in the future by blktrace, by the softlockup-watchdog, by printk and by lockstat. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h7
-rw-r--r--kernel/sched.c17
2 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 94f624aef017..33b9b4841ee7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
#endif
extern unsigned long long sched_clock(void);
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+extern unsigned long long cpu_clock(int cpu);
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
diff --git a/kernel/sched.c b/kernel/sched.c
index a35a92ff38fd..93cf241cfbe9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long long now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ now = rq_clock(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ return now;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Change a task's ->cfs_rq if it moves across CPUs */
static inline void set_task_cfs_rq(struct task_struct *p)