aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/topology.c
diff options
context:
space:
mode:
authorValentin Schneider2020-02-27 19:14:32 +0000
committerIngo Molnar2020-03-06 12:57:23 +0100
commit38502ab4bf3c463081bfd53356980a9ec2f32d1d (patch)
tree6c352256955a8885a816b59253cc5434ba7c289e /kernel/sched/topology.c
parent0621df315402dd7bc56f7272fae9778701289825 (diff)
sched/topology: Don't enable EAS on SMT systems
EAS already requires asymmetric CPU capacities to be enabled, and mixing this with SMT is an aberration, but better be safe than sorry. Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Acked-by: Quentin Perret <qperret@google.com> Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20200227191433.31994-2-valentin.schneider@arm.com
Diffstat (limited to 'kernel/sched/topology.c')
-rw-r--r--kernel/sched/topology.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 00911884b7e7..8344757bba6e 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -317,8 +317,9 @@ static void sched_energy_set(bool has_eas)
* EAS can be used on a root domain if it meets all the following conditions:
* 1. an Energy Model (EM) is available;
* 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
- * 3. the EM complexity is low enough to keep scheduling overheads low;
- * 4. schedutil is driving the frequency of all CPUs of the rd;
+ * 3. no SMT is detected.
+ * 4. the EM complexity is low enough to keep scheduling overheads low;
+ * 5. schedutil is driving the frequency of all CPUs of the rd;
*
* The complexity of the Energy Model is defined as:
*
@@ -360,6 +361,13 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
goto free;
}
+ /* EAS definitely does *not* handle SMT */
+ if (sched_smt_active()) {
+ pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
+ cpumask_pr_args(cpu_map));
+ goto free;
+ }
+
for_each_cpu(i, cpu_map) {
/* Skip already covered CPUs. */
if (find_pd(pd, i))