diff options
author | Jason Low | 2015-04-28 13:00:24 -0700 |
---|---|---|
committer | Ingo Molnar | 2015-05-08 12:17:46 +0200 |
commit | 7110744516276e906f9197e2857d026eb2343393 (patch) | |
tree | e9add9fbd4c901ed5b004e630370df515c6f1210 /kernel/time | |
parent | 971e8a985482c76487edb5a49811e99b96e846e1 (diff) |
sched, timer: Use the atomic task_cputime in thread_group_cputimer
Recent optimizations were made to thread_group_cputimer to improve its
scalability by keeping track of cputime stats without a lock. However,
the values were open coded to the structure, causing them to be at
a different abstraction level from the regular task_cputime structure.
Furthermore, any subsequent similar optimizations would not be able to
share the new code, since they are specific to thread_group_cputimer.
This patch adds the new task_cputime_atomic data structure (introduced in
the previous patch in the series) to thread_group_cputimer for keeping
track of the cputime atomically, which also helps generalize the code.
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Jason Low <jason.low2@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Waiman Long <Waiman.Long@hp.com>
Link: http://lkml.kernel.org/r/1430251224-5764-6-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index d85730669410..892e3dae0aac 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -211,20 +211,20 @@ retry: } } -static void update_gt_cputime(struct thread_group_cputimer *cputimer, struct task_cputime *sum) +static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) { - __update_gt_cputime(&cputimer->utime, sum->utime); - __update_gt_cputime(&cputimer->stime, sum->stime); - __update_gt_cputime(&cputimer->sum_exec_runtime, sum->sum_exec_runtime); + __update_gt_cputime(&cputime_atomic->utime, sum->utime); + __update_gt_cputime(&cputime_atomic->stime, sum->stime); + __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); } -/* Sample thread_group_cputimer values in "cputimer", store results in "times". */ -static inline void sample_group_cputimer(struct task_cputime *times, - struct thread_group_cputimer *cputimer) +/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ +static inline void sample_cputime_atomic(struct task_cputime *times, + struct task_cputime_atomic *atomic_times) { - times->utime = atomic64_read(&cputimer->utime); - times->stime = atomic64_read(&cputimer->stime); - times->sum_exec_runtime = atomic64_read(&cputimer->sum_exec_runtime); + times->utime = atomic64_read(&atomic_times->utime); + times->stime = atomic64_read(&atomic_times->stime); + times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); } void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) @@ -240,7 +240,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) * to synchronize the timer to the clock every time we start it. */ thread_group_cputime(tsk, &sum); - update_gt_cputime(cputimer, &sum); + update_gt_cputime(&cputimer->cputime_atomic, &sum); /* * We're setting cputimer->running without a lock. Ensure @@ -251,7 +251,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) */ WRITE_ONCE(cputimer->running, 1); } - sample_group_cputimer(times, cputimer); + sample_cputime_atomic(times, &cputimer->cputime_atomic); } /* @@ -1137,7 +1137,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) if (READ_ONCE(sig->cputimer.running)) { struct task_cputime group_sample; - sample_group_cputimer(&group_sample, &sig->cputimer); + sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; |