aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorShrikanth Hegde2024-03-26 20:56:16 +0530
committerIngo Molnar2024-03-28 10:39:18 +0100
commit902e786c4a54a2c4f7462b9026bb56610888db3d (patch)
tree1562733605c2dc6a3a6b233b63bea99a8de3eef0 /kernel/sched
parentc829d6818b60c591f70c060b2bb75d76cf0cec6d (diff)
sched/fair: Combine EAS check with root_domain::overutilized access
Access to root_domainoverutilized is always used with sched_energy_enabled in the pattern: if (sched_energy_enabled && !overutilized) do something So modify the helper function to utilize this pattern. This is more readable code as it would say, do something when root domain is not overutilized. This function always return true when EAS is disabled. No change in functionality intended. Suggested-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20240326152616.380999-1-sshegde@linux.ibm.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1856e58595c3..38462305bc39 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6686,12 +6686,11 @@ static inline bool cpu_overutilized(int cpu)
}
/*
- * Ensure that caller can do EAS. overutilized value
- * make sense only if EAS is enabled
+ * overutilized value make sense only if EAS is enabled
*/
static inline int is_rd_overutilized(struct root_domain *rd)
{
- return READ_ONCE(rd->overutilized);
+ return !sched_energy_enabled() || READ_ONCE(rd->overutilized);
}
static inline void set_rd_overutilized_status(struct root_domain *rd,
@@ -6710,8 +6709,6 @@ static inline void check_update_overutilized_status(struct rq *rq)
* overutilized field is used for load balancing decisions only
* if energy aware scheduler is being used
*/
- if (!sched_energy_enabled())
- return;
if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
@@ -7999,7 +7996,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
rcu_read_lock();
pd = rcu_dereference(rd->pd);
- if (!pd || is_rd_overutilized(rd))
+ if (!pd)
goto unlock;
/*
@@ -8202,7 +8199,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
cpumask_test_cpu(cpu, p->cpus_ptr))
return cpu;
- if (sched_energy_enabled()) {
+ if (!is_rd_overutilized(this_rq()->rd)) {
new_cpu = find_energy_efficient_cpu(p, prev_cpu);
if (new_cpu >= 0)
return new_cpu;
@@ -10903,12 +10900,9 @@ static struct sched_group *sched_balance_find_src_group(struct lb_env *env)
if (busiest->group_type == group_misfit_task)
goto force_balance;
- if (sched_energy_enabled()) {
- struct root_domain *rd = env->dst_rq->rd;
-
- if (rcu_dereference(rd->pd) && !is_rd_overutilized(rd))
- goto out_balanced;
- }
+ if (!is_rd_overutilized(env->dst_rq->rd) &&
+ rcu_dereference(env->dst_rq->rd->pd))
+ goto out_balanced;
/* ASYM feature bypasses nice load balance check */
if (busiest->group_type == group_asym_packing)