From e9d867a67fd03ccc07248ca4e9c2f74fed494d5b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra (Intel) Date: Thu, 10 Mar 2016 12:54:08 +0100 Subject: sched: Allow per-cpu kernel threads to run on online && !active In order to enable symmetric hotplug, we must mirror the online && !active state of cpu-down on the cpu-up side. However, to retain sanity, limit this state to per-cpu kthreads. Aside from the change to set_cpus_allowed_ptr(), which allow moving the per-cpu kthreads on, the other critical piece is the cpu selection for pinned tasks in select_task_rq(). This avoids dropping into select_fallback_rq(). select_fallback_rq() cannot be allowed to select !active cpus because its used to migrate user tasks away. And we do not want to move user tasks onto cpus that are in transition. Requested-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Tested-by: Thomas Gleixner Cc: Lai Jiangshan Cc: Jan H. Schönherr Cc: Oleg Nesterov Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160301152303.GV6356@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 49 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 7 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8b489fcac37b..8bfd7d4f1c21 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1082,13 +1082,21 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) static int __set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask, bool check) { + const struct cpumask *cpu_valid_mask = cpu_active_mask; + unsigned int dest_cpu; unsigned long flags; struct rq *rq; - unsigned int dest_cpu; int ret = 0; rq = task_rq_lock(p, &flags); + if (p->flags & PF_KTHREAD) { + /* + * Kernel threads are allowed on online && !active CPUs + */ + cpu_valid_mask = cpu_online_mask; + } + /* * Must re-check here, to close a race against __kthread_bind(), * sched_setaffinity() is not guaranteed to observe the flag. @@ -1101,18 +1109,28 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, if (cpumask_equal(&p->cpus_allowed, new_mask)) goto out; - if (!cpumask_intersects(new_mask, cpu_active_mask)) { + if (!cpumask_intersects(new_mask, cpu_valid_mask)) { ret = -EINVAL; goto out; } do_set_cpus_allowed(p, new_mask); + if (p->flags & PF_KTHREAD) { + /* + * For kernel threads that do indeed end up on online && + * !active we want to ensure they are strict per-cpu threads. + */ + WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && + !cpumask_intersects(new_mask, cpu_active_mask) && + p->nr_cpus_allowed != 1); + } + /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; - dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ @@ -1431,6 +1449,25 @@ EXPORT_SYMBOL_GPL(kick_process); /* * ->cpus_allowed is protected by both rq->lock and p->pi_lock + * + * A few notes on cpu_active vs cpu_online: + * + * - cpu_active must be a subset of cpu_online + * + * - on cpu-up we allow per-cpu kthreads on the online && !active cpu, + * see __set_cpus_allowed_ptr(). At this point the newly online + * cpu isn't yet part of the sched domains, and balancing will not + * see it. + * + * - on cpu-down we clear cpu_active() to mask the sched domains and + * avoid the load balancer to place new tasks on the to be removed + * cpu. Existing tasks will remain running there and will be taken + * off. + * + * This means that fallback selection must not select !active CPUs. + * And can assume that any active CPU must be online. Conversely + * select_task_rq() below may allow selection of !active CPUs in order + * to satisfy the above rules. */ static int select_fallback_rq(int cpu, struct task_struct *p) { @@ -1449,8 +1486,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p) /* Look for allowed, online CPU in same node. */ for_each_cpu(dest_cpu, nodemask) { - if (!cpu_online(dest_cpu)) - continue; if (!cpu_active(dest_cpu)) continue; if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) @@ -1461,8 +1496,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p) for (;;) { /* Any allowed, online CPU? */ for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { - if (!cpu_online(dest_cpu)) - continue; if (!cpu_active(dest_cpu)) continue; goto out; @@ -1514,6 +1547,8 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); + else + cpu = cpumask_any(tsk_cpus_allowed(p)); /* * In order not to call set_task_cpu() on a blocking task we need -- cgit v1.2.3 From 9cf7243d5d83d27aca47f842107bfa02b5f11d16 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:09 +0100 Subject: sched: Make set_cpu_rq_start_time() a built in hotplug state Start distangling the maze of hotplug notifiers in the scheduler. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/cpuhotplug.h | 1 + include/linux/sched.h | 1 + kernel/cpu.c | 6 ++++++ kernel/sched/core.c | 16 +++++++++------- 4 files changed, 17 insertions(+), 7 deletions(-) (limited to 'kernel/sched') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5d68e15e46b7..99fd1d2f76fe 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -8,6 +8,7 @@ enum cpuhp_state { CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, + CPUHP_AP_SCHED_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, diff --git a/include/linux/sched.h b/include/linux/sched.h index 52c4847b05e2..39597d0a005e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -372,6 +372,7 @@ extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); extern void scheduler_tick(void); +extern int sched_cpu_starting(unsigned int cpu); extern void sched_show_task(struct task_struct *p); diff --git a/kernel/cpu.c b/kernel/cpu.c index 3e3f6e49eabb..f46d02b966bf 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1236,6 +1236,12 @@ static struct cpuhp_step cpuhp_ap_states[] = { .name = "ap:offline", .cant_stop = true, }, + /* First state is scheduler control. Interrupts are disabled */ + [CPUHP_AP_SCHED_STARTING] = { + .name = "sched:starting", + .startup = sched_cpu_starting, + .teardown = NULL, + }, /* * Low level startup/teardown notifiers. Run with interrupts * disabled. Will be removed once the notifiers are converted to diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8bfd7d4f1c21..4df9aaae27a2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5473,10 +5473,10 @@ static struct notifier_block migration_notifier = { .priority = CPU_PRI_MIGRATION, }; -static void set_cpu_rq_start_time(void) +static void set_cpu_rq_start_time(unsigned int cpu) { - int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); + rq->age_stamp = sched_clock_cpu(cpu); } @@ -5486,10 +5486,6 @@ static int sched_cpu_active(struct notifier_block *nfb, int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - set_cpu_rq_start_time(); - return NOTIFY_OK; - case CPU_DOWN_FAILED: set_cpu_active(cpu, true); return NOTIFY_OK; @@ -5511,6 +5507,12 @@ static int sched_cpu_inactive(struct notifier_block *nfb, } } +int sched_cpu_starting(unsigned int cpu) +{ + set_cpu_rq_start_time(cpu); + return 0; +} + static int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); @@ -7426,7 +7428,7 @@ void __init sched_init(void) if (cpu_isolated_map == NULL) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); idle_thread_set_boot_cpu(); - set_cpu_rq_start_time(); + set_cpu_rq_start_time(smp_processor_id()); #endif init_sched_fair_class(); -- cgit v1.2.3 From e26fbffd32c28107d9d268b432706ccf84fb6411 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:10 +0100 Subject: sched: Allow hotplug notifiers to be setup early Prevent the SMP scheduler related notifiers to be executed before the smp scheduler is initialized and install them early. This is a preparatory change for further consolidation of the hotplug notifier maze. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 59 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 23 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4df9aaae27a2..328502c9af00 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5195,6 +5195,8 @@ out: #ifdef CONFIG_SMP +static bool sched_smp_initialized __read_mostly; + #ifdef CONFIG_NUMA_BALANCING /* Migrate current task p to target_cpu */ int migrate_task_to(struct task_struct *p, int target_cpu) @@ -5513,25 +5515,6 @@ int sched_cpu_starting(unsigned int cpu) return 0; } -static int __init migration_init(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - int err; - - /* Initialize migration for the boot CPU */ - err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); - BUG_ON(err == NOTIFY_BAD); - migration_call(&migration_notifier, CPU_ONLINE, cpu); - register_cpu_notifier(&migration_notifier); - - /* Register cpu active notifiers */ - cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); - cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); - - return 0; -} -early_initcall(migration_init); - static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ #ifdef CONFIG_SCHED_DEBUG @@ -6711,6 +6694,9 @@ static int sched_domains_numa_masks_update(struct notifier_block *nfb, { int cpu = (long)hcpu; + if (!sched_smp_initialized) + return NOTIFY_DONE; + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: sched_domains_numa_masks_set(cpu); @@ -7129,6 +7115,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { + if (!sched_smp_initialized) + return NOTIFY_DONE; + switch (action) { case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED_FROZEN: @@ -7169,6 +7158,9 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, bool overflow; int cpus; + if (!sched_smp_initialized) + return NOTIFY_DONE; + switch (action) { case CPU_DOWN_PREPARE: rcu_read_lock_sched(); @@ -7216,10 +7208,6 @@ void __init sched_init_smp(void) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); - hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); - hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); - hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); - init_hrtick(); /* Move init over to a non-isolated CPU */ @@ -7230,7 +7218,32 @@ void __init sched_init_smp(void) init_sched_rt_class(); init_sched_dl_class(); + sched_smp_initialized = true; } + +static int __init migration_init(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + int err; + + /* Initialize migration for the boot CPU */ + err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); + BUG_ON(err == NOTIFY_BAD); + migration_call(&migration_notifier, CPU_ONLINE, cpu); + register_cpu_notifier(&migration_notifier); + + /* Register cpu active notifiers */ + cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); + cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); + + hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); + hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); + hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); + + return 0; +} +early_initcall(migration_init); + #else void __init sched_init_smp(void) { -- cgit v1.2.3 From 135fb3e19773e66f56b60e3b9fdda6166e77c55d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:11 +0100 Subject: sched: Consolidate the notifier maze We can maintain the ordering of the scheduler cpu hotplug functionality nicely in one notifer. Get rid of the maze. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/cpu.h | 12 ++-- kernel/sched/core.c | 174 +++++++++++++++++++++------------------------------- 2 files changed, 73 insertions(+), 113 deletions(-) (limited to 'kernel/sched') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index f9b1fab4388a..17017051bfb1 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -61,19 +61,15 @@ struct notifier_block; enum { /* * SCHED_ACTIVE marks a cpu which is coming up active during - * CPU_ONLINE and CPU_DOWN_FAILED and must be the first - * notifier. CPUSET_ACTIVE adjusts cpuset according to - * cpu_active mask right after SCHED_ACTIVE. During - * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are - * ordered in the similar way. + * CPU_ONLINE and CPU_DOWN_FAILED and must be the first notifier. Is + * also cpuset according to cpu_active mask right after activating the + * cpu. During CPU_DOWN_PREPARE, SCHED_INACTIVE reversed the operation. * * This ordering guarantees consistent cpu_active mask and * migration behavior to all cpu notifiers. */ CPU_PRI_SCHED_ACTIVE = INT_MAX, - CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, - CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, - CPU_PRI_CPUSET_INACTIVE = INT_MIN, + CPU_PRI_SCHED_INACTIVE = INT_MIN, /* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 328502c9af00..8a03a04f5c3a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5482,39 +5482,6 @@ static void set_cpu_rq_start_time(unsigned int cpu) rq->age_stamp = sched_clock_cpu(cpu); } -static int sched_cpu_active(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_FAILED: - set_cpu_active(cpu, true); - return NOTIFY_OK; - - default: - return NOTIFY_DONE; - } -} - -static int sched_cpu_inactive(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: - set_cpu_active((long)hcpu, false); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } -} - -int sched_cpu_starting(unsigned int cpu) -{ - set_cpu_rq_start_time(cpu); - return 0; -} - static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ #ifdef CONFIG_SCHED_DEBUG @@ -6662,10 +6629,13 @@ static void sched_init_numa(void) init_numa_topology_type(); } -static void sched_domains_numa_masks_set(int cpu) +static void sched_domains_numa_masks_set(unsigned int cpu) { - int i, j; int node = cpu_to_node(cpu); + int i, j; + + if (!sched_smp_initialized) + return; for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) { @@ -6675,54 +6645,23 @@ static void sched_domains_numa_masks_set(int cpu) } } -static void sched_domains_numa_masks_clear(int cpu) +static void sched_domains_numa_masks_clear(unsigned int cpu) { int i, j; + + if (!sched_smp_initialized) + return; + for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); } } -/* - * Update sched_domains_numa_masks[level][node] array when new cpus - * are onlined. - */ -static int sched_domains_numa_masks_update(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - int cpu = (long)hcpu; - - if (!sched_smp_initialized) - return NOTIFY_DONE; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - sched_domains_numa_masks_set(cpu); - break; - - case CPU_DEAD: - sched_domains_numa_masks_clear(cpu); - break; - - default: - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} #else -static inline void sched_init_numa(void) -{ -} - -static int sched_domains_numa_masks_update(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - return 0; -} +static inline void sched_init_numa(void) { } +static void sched_domains_numa_masks_set(unsigned int cpu) { } +static void sched_domains_numa_masks_clear(unsigned int cpu) { } #endif /* CONFIG_NUMA */ static int __sdt_alloc(const struct cpumask *cpu_map) @@ -7112,16 +7051,12 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ * If we come here as part of a suspend/resume, don't touch cpusets because we * want to restore it back to its original state upon resume anyway. */ -static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static void cpuset_cpu_active(bool frozen) { if (!sched_smp_initialized) - return NOTIFY_DONE; - - switch (action) { - case CPU_ONLINE_FROZEN: - case CPU_DOWN_FAILED_FROZEN: + return; + if (frozen) { /* * num_cpus_frozen tracks how many CPUs are involved in suspend * resume sequence. As long as this is not the last online @@ -7131,38 +7066,28 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, num_cpus_frozen--; if (likely(num_cpus_frozen)) { partition_sched_domains(1, NULL, NULL); - break; + return; } - /* * This is the last CPU online operation. So fall through and * restore the original sched domains by considering the * cpuset configurations. */ - - case CPU_ONLINE: - cpuset_update_active_cpus(true); - break; - default: - return NOTIFY_DONE; } - return NOTIFY_OK; + cpuset_update_active_cpus(true); } -static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int cpuset_cpu_inactive(unsigned int cpu, bool frozen) { unsigned long flags; - long cpu = (long)hcpu; struct dl_bw *dl_b; bool overflow; int cpus; if (!sched_smp_initialized) - return NOTIFY_DONE; + return 0; - switch (action) { - case CPU_DOWN_PREPARE: + if (!frozen) { rcu_read_lock_sched(); dl_b = dl_bw_of(cpu); @@ -7174,17 +7099,60 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, rcu_read_unlock_sched(); if (overflow) - return notifier_from_errno(-EBUSY); + return -EBUSY; cpuset_update_active_cpus(false); - break; - case CPU_DOWN_PREPARE_FROZEN: + } else { num_cpus_frozen++; partition_sched_domains(1, NULL, NULL); - break; + } + return 0; +} + +static int sched_cpu_active(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + set_cpu_active(cpu, true); + sched_domains_numa_masks_set(cpu); + cpuset_cpu_active(action & CPU_TASKS_FROZEN); + return NOTIFY_OK; default: return NOTIFY_DONE; } - return NOTIFY_OK; +} + +static int sched_cpu_inactive(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + int ret; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: + set_cpu_active(cpu, false); + ret = cpuset_cpu_inactive(cpu, action & CPU_TASKS_FROZEN); + if (ret) { + set_cpu_active(cpu, true); + return notifier_from_errno(ret); + } + return NOTIFY_OK; + + case CPU_DEAD: + sched_domains_numa_masks_clear(cpu); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +int sched_cpu_starting(unsigned int cpu) +{ + set_cpu_rq_start_time(cpu); + return 0; } void __init sched_init_smp(void) @@ -7236,10 +7204,6 @@ static int __init migration_init(void) cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); - hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); - hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); - hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); - return 0; } early_initcall(migration_init); -- cgit v1.2.3 From c6d2c7475ced868cfc636cd5a55a428da94537c3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:12 +0100 Subject: sched: Move sched_domains_numa_masks_clear() to DOWN_PREPARE This is the last operation on the cpu before vanishing. No point in calling that on CPU_DEAD. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8a03a04f5c3a..541f9ab8ce4f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7139,9 +7139,6 @@ static int sched_cpu_inactive(struct notifier_block *nfb, set_cpu_active(cpu, true); return notifier_from_errno(ret); } - return NOTIFY_OK; - - case CPU_DEAD: sched_domains_numa_masks_clear(cpu); return NOTIFY_OK; default: -- cgit v1.2.3 From 40190a78f85fec29f0fdd21f6b4415712085711e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:13 +0100 Subject: sched/hotplug: Convert cpu_[in]active notifiers to state machine Now that we reduced everything into single notifiers, it's simple to move them into the hotplug state machine space. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/cpu.h | 12 --------- include/linux/cpuhotplug.h | 1 + include/linux/sched.h | 2 ++ kernel/cpu.c | 8 ++++-- kernel/sched/core.c | 67 +++++++++++++++------------------------------- 5 files changed, 30 insertions(+), 60 deletions(-) (limited to 'kernel/sched') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 17017051bfb1..b22b000cf6ee 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -59,18 +59,6 @@ struct notifier_block; * CPU notifier priorities. */ enum { - /* - * SCHED_ACTIVE marks a cpu which is coming up active during - * CPU_ONLINE and CPU_DOWN_FAILED and must be the first notifier. Is - * also cpuset according to cpu_active mask right after activating the - * cpu. During CPU_DOWN_PREPARE, SCHED_INACTIVE reversed the operation. - * - * This ordering guarantees consistent cpu_active mask and - * migration behavior to all cpu notifiers. - */ - CPU_PRI_SCHED_ACTIVE = INT_MAX, - CPU_PRI_SCHED_INACTIVE = INT_MIN, - /* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, CPU_PRI_MIGRATION = 10, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 99fd1d2f76fe..9e07468bf1c5 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -13,6 +13,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, + CPUHP_AP_ACTIVE, CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, diff --git a/include/linux/sched.h b/include/linux/sched.h index 39597d0a005e..1e5f961b1a74 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -373,6 +373,8 @@ extern void trap_init(void); extern void update_process_times(int user); extern void scheduler_tick(void); extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); extern void sched_show_task(struct task_struct *p); diff --git a/kernel/cpu.c b/kernel/cpu.c index f46d02b966bf..15402b72fbc7 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -923,8 +923,6 @@ void cpuhp_online_idle(enum cpuhp_state state) st->state = CPUHP_AP_ONLINE_IDLE; - /* The cpu is marked online, set it active now */ - set_cpu_active(cpu, true); /* Unpark the stopper thread and the hotplug thread of this cpu */ stop_machine_unpark(cpu); kthread_unpark(st->thread); @@ -1259,6 +1257,12 @@ static struct cpuhp_step cpuhp_ap_states[] = { [CPUHP_AP_ONLINE] = { .name = "ap:online", }, + /* First state is scheduler control. Interrupts are enabled */ + [CPUHP_AP_ACTIVE] = { + .name = "sched:active", + .startup = sched_cpu_activate, + .teardown = sched_cpu_deactivate, + }, /* Handle smpboot threads park/unpark */ [CPUHP_AP_SMPBOOT_THREADS] = { .name = "smpboot:threads", diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 541f9ab8ce4f..73bcd937d436 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6634,9 +6634,6 @@ static void sched_domains_numa_masks_set(unsigned int cpu) int node = cpu_to_node(cpu); int i, j; - if (!sched_smp_initialized) - return; - for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) { if (node_distance(j, node) <= sched_domains_numa_distance[i]) @@ -6649,9 +6646,6 @@ static void sched_domains_numa_masks_clear(unsigned int cpu) { int i, j; - if (!sched_smp_initialized) - return; - for (i = 0; i < sched_domains_numa_levels; i++) { for (j = 0; j < nr_node_ids; j++) cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); @@ -7051,12 +7045,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ * If we come here as part of a suspend/resume, don't touch cpusets because we * want to restore it back to its original state upon resume anyway. */ -static void cpuset_cpu_active(bool frozen) +static void cpuset_cpu_active(void) { - if (!sched_smp_initialized) - return; - - if (frozen) { + if (cpuhp_tasks_frozen) { /* * num_cpus_frozen tracks how many CPUs are involved in suspend * resume sequence. As long as this is not the last online @@ -7077,17 +7068,14 @@ static void cpuset_cpu_active(bool frozen) cpuset_update_active_cpus(true); } -static int cpuset_cpu_inactive(unsigned int cpu, bool frozen) +static int cpuset_cpu_inactive(unsigned int cpu) { unsigned long flags; struct dl_bw *dl_b; bool overflow; int cpus; - if (!sched_smp_initialized) - return 0; - - if (!frozen) { + if (!cpuhp_tasks_frozen) { rcu_read_lock_sched(); dl_b = dl_bw_of(cpu); @@ -7108,42 +7096,33 @@ static int cpuset_cpu_inactive(unsigned int cpu, bool frozen) return 0; } -static int sched_cpu_active(struct notifier_block *nfb, unsigned long action, - void *hcpu) +int sched_cpu_activate(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; + set_cpu_active(cpu, true); - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_FAILED: - case CPU_ONLINE: - set_cpu_active(cpu, true); + if (sched_smp_initialized) { sched_domains_numa_masks_set(cpu); - cpuset_cpu_active(action & CPU_TASKS_FROZEN); - return NOTIFY_OK; - default: - return NOTIFY_DONE; + cpuset_cpu_active(); } + return 0; } -static int sched_cpu_inactive(struct notifier_block *nfb, - unsigned long action, void *hcpu) +int sched_cpu_deactivate(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; int ret; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: - set_cpu_active(cpu, false); - ret = cpuset_cpu_inactive(cpu, action & CPU_TASKS_FROZEN); - if (ret) { - set_cpu_active(cpu, true); - return notifier_from_errno(ret); - } - sched_domains_numa_masks_clear(cpu); - return NOTIFY_OK; - default: - return NOTIFY_DONE; + set_cpu_active(cpu, false); + + if (!sched_smp_initialized) + return 0; + + ret = cpuset_cpu_inactive(cpu); + if (ret) { + set_cpu_active(cpu, true); + return ret; } + sched_domains_numa_masks_clear(cpu); + return 0; } int sched_cpu_starting(unsigned int cpu) @@ -7197,10 +7176,6 @@ static int __init migration_init(void) migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); - /* Register cpu active notifiers */ - cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); - cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); - return 0; } early_initcall(migration_init); -- cgit v1.2.3 From b2454caa8977ade27292a71f2def5e403e24b4d5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 10 Mar 2016 12:54:14 +0100 Subject: sched/hotplug: Move sync_rcu to be with set_cpu_active(false) The sync_rcu stuff is specificically for clearing bits in the active mask, such that everybody will observe the bit cleared and will not consider the cleared CPU for load-balancing etc. Signed-off-by: Peter Zijlstra (Intel) Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.169219710@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/cpu.c | 15 --------------- kernel/sched/core.c | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 15 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/cpu.c b/kernel/cpu.c index 15402b72fbc7..c134a35374a1 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu) struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; - /* - * By now we've cleared cpu_active_mask, wait for all preempt-disabled - * and RCU users of this state to go away such that all new such users - * will observe it. - * - * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might - * not imply sync_sched(), so wait for both. - * - * Do sync before park smpboot threads to take care the rcu boost case. - */ - if (IS_ENABLED(CONFIG_PREEMPT)) - synchronize_rcu_mult(call_rcu, call_rcu_sched); - else - synchronize_rcu(); - /* Park the smpboot threads */ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); smpboot_park_threads(cpu); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 73bcd937d436..0a3107809972 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7112,6 +7112,20 @@ int sched_cpu_deactivate(unsigned int cpu) int ret; set_cpu_active(cpu, false); + /* + * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU + * users of this state to go away such that all new such users will + * observe it. + * + * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might + * not imply sync_sched(), so wait for both. + * + * Do sync before park smpboot threads to take care the rcu boost case. + */ + if (IS_ENABLED(CONFIG_PREEMPT)) + synchronize_rcu_mult(call_rcu, call_rcu_sched); + else + synchronize_rcu(); if (!sched_smp_initialized) return 0; -- cgit v1.2.3 From 94baf7a5d882cde0b4d591f4ab89cc32ee39ac6a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:15 +0100 Subject: sched/migration: Move prepare transition to SCHED_STARTING state We can piggy pack that on the SCHED_STARTING state. It's not required before the cpu actually comes online. Name the function proper as it has nothing to do with migration. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.248226511@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0a3107809972..bafc308e6d45 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5424,11 +5424,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - rq->calc_load_update = calc_load_update; - account_reset_rq(rq); - break; - case CPU_ONLINE: /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); @@ -7139,9 +7134,19 @@ int sched_cpu_deactivate(unsigned int cpu) return 0; } +static void sched_rq_cpu_starting(unsigned int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + rq->calc_load_update = calc_load_update; + account_reset_rq(rq); + update_max_interval(); +} + int sched_cpu_starting(unsigned int cpu) { set_cpu_rq_start_time(cpu); + sched_rq_cpu_starting(cpu); return 0; } @@ -7182,11 +7187,8 @@ void __init sched_init_smp(void) static int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); - int err; - /* Initialize migration for the boot CPU */ - err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); - BUG_ON(err == NOTIFY_BAD); + sched_rq_cpu_starting(smp_processor_id()); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); -- cgit v1.2.3 From e9cd8fa4fcfd67c95db9b87c0fff88fa23cb00e5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:16 +0100 Subject: sched/migration: Move calc_load_migrate() into CPU_DYING It really does not matter when we fold the load for the outgoing cpu. It's almost dead anyway, so there is no harm if we fail to fold the few microseconds which are required for going fully away. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.328739226@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bafc308e6d45..688e8a83208c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5447,9 +5447,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) migrate_tasks(rq); BUG_ON(rq->nr_running != 1); /* the migration thread */ raw_spin_unlock_irqrestore(&rq->lock, flags); - break; - - case CPU_DEAD: calc_load_migrate(rq); break; #endif -- cgit v1.2.3 From 7d97669933eb94245ec9b715753753ec5ca8f646 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:17 +0100 Subject: sched/migration: Move CPU_ONLINE into scheduler state The alleged requirement that the migration notifier has a lower priority than perf is completely undocumented and there is no indication at all that this is true. perf does not even handle the CPU_ONLINE notification and perf really has nothing to do with migration. Move the CPU_ONLINE code into the sched_activate_cpu() state callback. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.421743581@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 688e8a83208c..8d8d9034edff 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5424,17 +5424,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - /* Update our root-domain */ - raw_spin_lock_irqsave(&rq->lock, flags); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - - set_rq_online(rq); - } - raw_spin_unlock_irqrestore(&rq->lock, flags); - break; - #ifdef CONFIG_HOTPLUG_CPU case CPU_DYING: sched_ttwu_pending(); @@ -7090,12 +7079,34 @@ static int cpuset_cpu_inactive(unsigned int cpu) int sched_cpu_activate(unsigned int cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + set_cpu_active(cpu, true); if (sched_smp_initialized) { sched_domains_numa_masks_set(cpu); cpuset_cpu_active(); } + + /* + * Put the rq online, if not already. This happens: + * + * 1) In the early boot process, because we build the real domains + * after all cpus have been brought up. + * + * 2) At runtime, if cpuset_cpu_active() fails to rebuild the + * domains. + */ + raw_spin_lock_irqsave(&rq->lock, flags); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_online(rq); + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + + update_max_interval(); + return 0; } -- cgit v1.2.3 From f2785ddb5367e217365099294b89d6a84668069e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:18 +0100 Subject: sched/hotplug: Move migration CPU_DYING to sched_cpu_dying() Remove the hotplug notifier and make it an explicit state. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.502222097@linutronix.de Signed-off-by: Thomas Gleixner --- include/linux/cpu.h | 2 -- include/linux/sched.h | 6 +++++ kernel/cpu.c | 2 +- kernel/sched/core.c | 72 ++++++++++++++++----------------------------------- 4 files changed, 29 insertions(+), 53 deletions(-) (limited to 'kernel/sched') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b22b000cf6ee..21597dcac0e2 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -59,9 +59,7 @@ struct notifier_block; * CPU notifier priorities. */ enum { - /* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, - CPU_PRI_MIGRATION = 10, /* bring up workqueues before normal notifiers and down after */ CPU_PRI_WORKQUEUE_UP = 5, diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e5f961b1a74..47835cf8aefa 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -376,6 +376,12 @@ extern int sched_cpu_starting(unsigned int cpu); extern int sched_cpu_activate(unsigned int cpu); extern int sched_cpu_deactivate(unsigned int cpu); +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_LOCKUP_DETECTOR diff --git a/kernel/cpu.c b/kernel/cpu.c index c134a35374a1..d6eeb8c5ef88 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1223,7 +1223,7 @@ static struct cpuhp_step cpuhp_ap_states[] = { [CPUHP_AP_SCHED_STARTING] = { .name = "sched:starting", .startup = sched_cpu_starting, - .teardown = NULL, + .teardown = sched_cpu_dying, }, /* * Low level startup/teardown notifiers. Run with interrupts diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8d8d9034edff..a9a65ed772e3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5411,51 +5411,6 @@ static void set_rq_offline(struct rq *rq) } } -/* - * migration_call - callback that gets triggered when a CPU is added. - * Here we can start up the necessary migration thread for the new CPU. - */ -static int -migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - unsigned long flags; - struct rq *rq = cpu_rq(cpu); - - switch (action & ~CPU_TASKS_FROZEN) { - -#ifdef CONFIG_HOTPLUG_CPU - case CPU_DYING: - sched_ttwu_pending(); - /* Update our root-domain */ - raw_spin_lock_irqsave(&rq->lock, flags); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_offline(rq); - } - migrate_tasks(rq); - BUG_ON(rq->nr_running != 1); /* the migration thread */ - raw_spin_unlock_irqrestore(&rq->lock, flags); - calc_load_migrate(rq); - break; -#endif - } - - update_max_interval(); - - return NOTIFY_OK; -} - -/* - * Register at high priority so that task migration (migrate_all_tasks) - * happens before everything else. This has to be lower priority than - * the notifier in the perf_event subsystem, though. - */ -static struct notifier_block migration_notifier = { - .notifier_call = migration_call, - .priority = CPU_PRI_MIGRATION, -}; - static void set_cpu_rq_start_time(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); @@ -7158,6 +7113,28 @@ int sched_cpu_starting(unsigned int cpu) return 0; } +#ifdef CONFIG_HOTPLUG_CPU +int sched_cpu_dying(unsigned int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + /* Handle pending wakeups and then migrate everything off */ + sched_ttwu_pending(); + raw_spin_lock_irqsave(&rq->lock, flags); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_offline(rq); + } + migrate_tasks(rq); + BUG_ON(rq->nr_running != 1); + raw_spin_unlock_irqrestore(&rq->lock, flags); + calc_load_migrate(rq); + update_max_interval(); + return 0; +} +#endif + void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; @@ -7194,12 +7171,7 @@ void __init sched_init_smp(void) static int __init migration_init(void) { - void *cpu = (void *)(long)smp_processor_id(); - sched_rq_cpu_starting(smp_processor_id()); - migration_call(&migration_notifier, CPU_ONLINE, cpu); - register_cpu_notifier(&migration_notifier); - return 0; } early_initcall(migration_init); -- cgit v1.2.3 From 20a5c8cc74ade5027c2b0e2bc724278afd6054f3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:20 +0100 Subject: sched/fair: Make ilb_notifier an explicit call No need for an extra notifier. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.693720241@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 1 + kernel/sched/fair.c | 15 +-------------- kernel/sched/sched.h | 4 ++++ 3 files changed, 6 insertions(+), 14 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a9a65ed772e3..28ffd6882ea6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7131,6 +7131,7 @@ int sched_cpu_dying(unsigned int cpu) raw_spin_unlock_irqrestore(&rq->lock, flags); calc_load_migrate(rq); update_max_interval(); + nohz_balance_exit_idle(cpu); return 0; } #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0fe30e66aff1..8b6db3626566 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7711,7 +7711,7 @@ static void nohz_balancer_kick(void) return; } -static inline void nohz_balance_exit_idle(int cpu) +void nohz_balance_exit_idle(unsigned int cpu) { if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { /* @@ -7784,18 +7784,6 @@ void nohz_balance_enter_idle(int cpu) atomic_inc(&nohz.nr_cpus); set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } - -static int sched_ilb_notifier(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DYING: - nohz_balance_exit_idle(smp_processor_id()); - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } -} #endif static DEFINE_SPINLOCK(balancing); @@ -8600,7 +8588,6 @@ __init void init_sched_fair_class(void) #ifdef CONFIG_NO_HZ_COMMON nohz.next_balance = jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); - cpu_notifier(sched_ilb_notifier, 0); #endif #endif /* SMP */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ec2e8d23527e..16a27b624ee5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1743,6 +1743,10 @@ enum rq_nohz_flag_bits { }; #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) + +extern void nohz_balance_exit_idle(unsigned int cpu); +#else +static inline void nohz_balance_exit_idle(unsigned int cpu) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING -- cgit v1.2.3 From e5ef27d0f5acf9f1db2882d7546a41c021f66820 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:21 +0100 Subject: sched: Make hrtick_notifier an explicit call No need for an extra notifier. We don't need to handle all these states. It's sufficient to kill the timer when the cpu dies. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.770528462@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 34 +--------------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 28ffd6882ea6..9c710ad0ac22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -249,29 +249,6 @@ void hrtick_start(struct rq *rq, u64 delay) } } -static int -hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (int)(long)hcpu; - - switch (action) { - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: - hrtick_clear(cpu_rq(cpu)); - return NOTIFY_OK; - } - - return NOTIFY_DONE; -} - -static __init void init_hrtick(void) -{ - hotcpu_notifier(hotplug_hrtick, 0); -} #else /* * Called to set the hrtick timer state. @@ -288,10 +265,6 @@ void hrtick_start(struct rq *rq, u64 delay) hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL_PINNED); } - -static inline void init_hrtick(void) -{ -} #endif /* CONFIG_SMP */ static void init_rq_hrtick(struct rq *rq) @@ -315,10 +288,6 @@ static inline void hrtick_clear(struct rq *rq) static inline void init_rq_hrtick(struct rq *rq) { } - -static inline void init_hrtick(void) -{ -} #endif /* CONFIG_SCHED_HRTICK */ /* @@ -7132,6 +7101,7 @@ int sched_cpu_dying(unsigned int cpu) calc_load_migrate(rq); update_max_interval(); nohz_balance_exit_idle(cpu); + hrtick_clear(rq); return 0; } #endif @@ -7157,8 +7127,6 @@ void __init sched_init_smp(void) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); - init_hrtick(); - /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); -- cgit v1.2.3