diff options
author | Linus Torvalds | 2016-10-03 13:39:00 -0700 |
---|---|---|
committer | Linus Torvalds | 2016-10-03 13:39:00 -0700 |
commit | af79ad2b1f337a00aa150b993635b10bc68dc842 (patch) | |
tree | 06abe1d9735b27a449443d7d29a9801f690080be /arch | |
parent | e606d81d2d9596ab2b4fd0dc052eea0485b7e8c2 (diff) | |
parent | 447976ef4fd09b1be88b316d1a81553f1aa7cd07 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"The main changes are:
- irqtime accounting cleanups and enhancements. (Frederic Weisbecker)
- schedstat debugging enhancements, make it more broadly runtime
available. (Josh Poimboeuf)
- More work on asymmetric topology/capacity scheduling. (Morten
Rasmussen)
- sched/wait fixes and cleanups. (Oleg Nesterov)
- PELT (per entity load tracking) improvements. (Peter Zijlstra)
- Rewrite and enhance select_idle_siblings(). (Peter Zijlstra)
- sched/numa enhancements/fixes (Rik van Riel)
- sched/cputime scalability improvements (Stanislaw Gruszka)
- Load calculation arithmetics fixes. (Dietmar Eggemann)
- sched/deadline enhancements (Tommaso Cucinotta)
- Fix utilization accounting when switching to the SCHED_NORMAL
policy. (Vincent Guittot)
- ... plus misc cleanups and enhancements"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (64 commits)
sched/irqtime: Consolidate irqtime flushing code
sched/irqtime: Consolidate accounting synchronization with u64_stats API
u64_stats: Introduce IRQs disabled helpers
sched/irqtime: Remove needless IRQs disablement on kcpustat update
sched/irqtime: No need for preempt-safe accessors
sched/fair: Fix min_vruntime tracking
sched/debug: Add SCHED_WARN_ON()
sched/core: Fix set_user_nice()
sched/fair: Introduce set_curr_task() helper
sched/core, ia64: Rename set_curr_task()
sched/core: Fix incorrect utilization accounting when switching to fair class
sched/core: Optimize SCHED_SMT
sched/core: Rewrite and improve select_idle_siblings()
sched/core: Replace sd_busy/nr_busy_cpus with sched_domain_shared
sched/core: Introduce 'struct sched_domain_shared'
sched/core: Restructure destroy_sched_domain()
sched/core: Remove unused @cpu argument from destroy_sched_domain*()
sched/wait: Introduce init_wait_entry()
sched/wait: Avoid abort_exclusive_wait() in __wait_on_bit_lock()
sched/wait: Avoid abort_exclusive_wait() in ___wait_event()
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/mca.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 46 |
2 files changed, 35 insertions, 21 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index eb9220cde76c..d47616c8b885 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -986,7 +986,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, int cpu = smp_processor_id(); previous_current = curr_task(cpu); - set_curr_task(cpu, current); + ia64_set_curr_task(cpu, current); if ((p = strchr(current->comm, ' '))) *p = '\0'; @@ -1360,14 +1360,14 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */ while (monarch_cpu != -1) cpu_relax(); /* spin until last cpu leaves */ - set_curr_task(cpu, previous_current); + ia64_set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; return; } } } - set_curr_task(cpu, previous_current); + ia64_set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; monarch_cpu = -1; /* This frees the slaves and previous monarchs */ } @@ -1729,7 +1729,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1); mprintk("Slave on cpu %d returning to normal service.\n", cpu); - set_curr_task(cpu, previous_current); + ia64_set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; atomic_dec(&slaves); return; @@ -1756,7 +1756,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); atomic_dec(&monarchs); - set_curr_task(cpu, previous_current); + ia64_set_curr_task(cpu, previous_current); monarch_cpu = -1; return; } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 4296beb8fdd3..7137ec4eea9a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -471,7 +471,7 @@ static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) return false; } -static struct sched_domain_topology_level numa_inside_package_topology[] = { +static struct sched_domain_topology_level x86_numa_in_package_topology[] = { #ifdef CONFIG_SCHED_SMT { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, #endif @@ -480,22 +480,23 @@ static struct sched_domain_topology_level numa_inside_package_topology[] = { #endif { NULL, }, }; + +static struct sched_domain_topology_level x86_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, +#endif +#ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + /* - * set_sched_topology() sets the topology internal to a CPU. The - * NUMA topologies are layered on top of it to build the full - * system topology. - * - * If NUMA nodes are observed to occur within a CPU package, this - * function should be called. It forces the sched domain code to - * only use the SMT level for the CPU portion of the topology. - * This essentially falls back to relying on NUMA information - * from the SRAT table to describe the entire system topology - * (except for hyperthreads). + * Set if a package/die has multiple NUMA nodes inside. + * AMD Magny-Cours and Intel Cluster-on-Die have this. */ -static void primarily_use_numa_for_topology(void) -{ - set_sched_topology(numa_inside_package_topology); -} +static bool x86_has_numa_in_package; void set_cpu_sibling_map(int cpu) { @@ -558,7 +559,7 @@ void set_cpu_sibling_map(int cpu) c->booted_cores = cpu_data(i).booted_cores; } if (match_die(c, o) && !topology_same_node(c, o)) - primarily_use_numa_for_topology(); + x86_has_numa_in_package = true; } threads = cpumask_weight(topology_sibling_cpumask(cpu)); @@ -1304,6 +1305,16 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); } + + /* + * Set 'default' x86 topology, this matches default_topology() in that + * it has NUMA nodes as a topology level. See also + * native_smp_cpus_done(). + * + * Must be done before set_cpus_sibling_map() is ran. + */ + set_sched_topology(x86_topology); + set_cpu_sibling_map(0); switch (smp_sanity_check(max_cpus)) { @@ -1370,6 +1381,9 @@ void __init native_smp_cpus_done(unsigned int max_cpus) { pr_debug("Boot done\n"); + if (x86_has_numa_in_package) + set_sched_topology(x86_numa_in_package_topology); + nmi_selftest(); impress_friends(); setup_ioapic_dest(); |