diff options
author | Linus Torvalds | 2013-07-02 16:17:25 -0700 |
---|---|---|
committer | Linus Torvalds | 2013-07-02 16:17:25 -0700 |
commit | 2d722f6d5671794c0de0e29e3da75006ac086718 (patch) | |
tree | 042c6320cfd771798f985e79aa0422cbe8096258 /include | |
parent | f0bb4c0ab064a8aeeffbda1cee380151a594eaab (diff) | |
parent | 2fd1b487884310d0aa0c0640179dc7490ad86313 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes:
- load-calculation cleanups and improvements, by Alex Shi
- various nohz related tidying up of statisics, by Frederic
Weisbecker
- factor out /proc functions to kernel/sched/proc.c, by Paul
Gortmaker
- simplify the RT policy scheduler, by Kirill Tkhai
- various fixes and cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (42 commits)
sched/debug: Remove CONFIG_FAIR_GROUP_SCHED mask
sched/debug: Fix formatting of /proc/<PID>/sched
sched: Fix typo in struct sched_avg member description
sched/fair: Fix typo describing flags in enqueue_entity
sched/debug: Add load-tracking statistics to task
sched: Change get_rq_runnable_load() to static and inline
sched/tg: Remove tg.load_weight
sched/cfs_rq: Change atomic64_t removed_load to atomic_long_t
sched/tg: Use 'unsigned long' for load variable in task group
sched: Change cfs_rq load avg to unsigned long
sched: Consider runnable load average in move_tasks()
sched: Compute runnable load avg in cpu_load and cpu_avg_load_per_task
sched: Update cpu load after task_tick
sched: Fix sleep time double accounting in enqueue entity
sched: Set an initial value of runnable avg for new forked task
sched: Move a few runnable tg variables into CONFIG_SMP
Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"
sched: Don't mix use of typedef ctl_table and struct ctl_table
sched: Remove WARN_ON(!sd) from init_sched_groups_power()
sched: Fix memory leakage in build_sched_groups()
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/completion.h | 2 | ||||
-rw-r--r-- | include/linux/perf_event.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 9 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 2 | ||||
-rw-r--r-- | include/uapi/asm-generic/unistd.h | 2 |
5 files changed, 6 insertions, 11 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h index 33f0280fd533..3cd574d5b19e 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -5,7 +5,7 @@ * (C) Copyright 2001 Linus Torvalds * * Atomic wait-for-completion handler data structures. - * See kernel/sched.c for details. + * See kernel/sched/core.c for details. */ #include <linux/wait.h> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 50b3efd14d29..8873f82c7baa 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -822,7 +822,7 @@ static inline void perf_restore_debug_store(void) { } #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) /* - * This has to have a higher priority than migration_notifier in sched.c. + * This has to have a higher priority than migration_notifier in sched/core.c. */ #define perf_cpu_notifier(fn) \ do { \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 178a8d909f14..ec80684a0127 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -924,7 +924,7 @@ struct load_weight { struct sched_avg { /* * These sums represent an infinite geometric series and so are bound - * above by 1024/(1-y). Thus we only need a u32 to store them for for all + * above by 1024/(1-y). Thus we only need a u32 to store them for all * choices of y < 1-2^(-32)*1024. */ u32 runnable_avg_sum, runnable_avg_period; @@ -994,12 +994,7 @@ struct sched_entity { struct cfs_rq *my_q; #endif -/* - * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be - * removed when useful for applications beyond shares distribution (e.g. - * load-balance). - */ -#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) +#ifdef CONFIG_SMP /* Per-entity load-tracking */ struct sched_avg avg; #endif diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index e2369c167dbd..8b3ac0d718eb 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -67,7 +67,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) -/* for sched.c and kernel_lock.c: */ +/* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 0cc74c4403e4..a20a9b4d3871 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -361,7 +361,7 @@ __SYSCALL(__NR_syslog, sys_syslog) #define __NR_ptrace 117 __SYSCALL(__NR_ptrace, sys_ptrace) -/* kernel/sched.c */ +/* kernel/sched/core.c */ #define __NR_sched_setparam 118 __SYSCALL(__NR_sched_setparam, sys_sched_setparam) #define __NR_sched_setscheduler 119 |