aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar2024-03-08 12:18:18 +0100
committerIngo Molnar2024-03-12 12:00:00 +0100
commita88b17080294f735c4124acccfa2d803a6a7d46f (patch)
treed79072773755cce5e784a1f160b11581f5fc6742 /kernel/sched
parent646ebaf51c64c6416ca89765c20041363fc1b518 (diff)
sched/balancing: Rename find_idlest_group() => sched_balance_find_dst_group()
Standardize scheduler load-balancing function names on the sched_balance_() prefix. Also use 'dst' instead of 'idlest', because it's not really true that we return the 'idlest' group or CPU, we sort by idle-exit latency and only return the idlest CPUs from the lowest-latency set of CPUs. The true 'idlest' CPUs often remain idle for a long time and are never returned as long as the system is under-loaded. Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20240308111819.1101550-13-mingo@kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 02ff0272b2e4..d0c3a091d7d1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7098,7 +7098,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
}
static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
+sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
/*
* sched_balance_find_dst_group_cpu - find the idlest CPU among the CPUs in the group.
@@ -7185,7 +7185,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
continue;
}
- group = find_idlest_group(sd, p, cpu);
+ group = sched_balance_find_dst_group(sd, p, cpu);
if (!group) {
sd = sd->child;
continue;
@@ -10296,13 +10296,13 @@ static bool update_pick_idlest(struct sched_group *idlest,
}
/*
- * find_idlest_group() finds and returns the least busy CPU group within the
+ * sched_balance_find_dst_group() finds and returns the least busy CPU group within the
* domain.
*
* Assumes p is allowed on at least one CPU in sd.
*/
static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
+sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
{
struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
struct sg_lb_stats local_sgs, tmp_sgs;