aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar2024-03-08 12:18:10 +0100
committerIngo Molnar2024-03-12 11:59:59 +0100
commit14ff4dbd34f46cc6b6105f549983321241ccbba9 (patch)
tree744c74967f445e7332ab7022f9793e9ec20b60a9
parent983be0628c061989b6cc175d2f5e429b40699fbb (diff)
sched/balancing: Rename rebalance_domains() => sched_balance_domains()
Standardize scheduler load-balancing function names on the sched_balance_() prefix. Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20240308111819.1101550-5-mingo@kernel.org
-rw-r--r--Documentation/scheduler/sched-domains.rst2
-rw-r--r--Documentation/translations/zh_CN/scheduler/sched-domains.rst2
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/sched.h2
5 files changed, 8 insertions, 8 deletions
diff --git a/Documentation/scheduler/sched-domains.rst b/Documentation/scheduler/sched-domains.rst
index c7ea05f4107b..5d8e8b8b269e 100644
--- a/Documentation/scheduler/sched-domains.rst
+++ b/Documentation/scheduler/sched-domains.rst
@@ -34,7 +34,7 @@ out of balance are tasks moved between groups.
In kernel/sched/core.c, sched_balance_trigger() is run periodically on each CPU
through sched_tick(). It raises a softirq after the next regularly scheduled
rebalancing event for the current runqueue has arrived. The actual load
-balancing workhorse, sched_balance_softirq()->rebalance_domains(), is then run
+balancing workhorse, sched_balance_softirq()->sched_balance_domains(), is then run
in softirq context (SCHED_SOFTIRQ).
The latter function takes two arguments: the runqueue of current CPU and whether
diff --git a/Documentation/translations/zh_CN/scheduler/sched-domains.rst b/Documentation/translations/zh_CN/scheduler/sched-domains.rst
index 1a8587a971f9..e6590fd80640 100644
--- a/Documentation/translations/zh_CN/scheduler/sched-domains.rst
+++ b/Documentation/translations/zh_CN/scheduler/sched-domains.rst
@@ -36,7 +36,7 @@ CPU共享。任意两个组的CPU掩码的交集不一定为空,如果是这
在kernel/sched/core.c中,sched_balance_trigger()在每个CPU上通过sched_tick()
周期执行。在当前运行队列下一个定期调度再平衡事件到达后,它引发一个软中断。负载均衡真正
-的工作由sched_balance_softirq()->rebalance_domains()完成,在软中断上下文中执行
+的工作由sched_balance_softirq()->sched_balance_domains()完成,在软中断上下文中执行
(SCHED_SOFTIRQ)。
后一个函数有两个入参:当前CPU的运行队列、它在sched_tick()调用时是否空闲。函数会从
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index ef0058de432b..2336ee2aa44a 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,7 +42,7 @@
* can take this difference into account during load balance. A per cpu
* structure is preferred because each CPU updates its own cpu_capacity field
* during the load balance except for idle cores. One idle core is selected
- * to run the rebalance_domains for all idle cores and the cpu_capacity can be
+ * to run the sched_balance_domains for all idle cores and the cpu_capacity can be
* updated during this sequence.
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e377b675920a..330788b0c617 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11685,7 +11685,7 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
*
* Balancing parameters are set up in init_sched_domains.
*/
-static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
+static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
{
int continue_balancing = 1;
int cpu = rq->cpu;
@@ -12161,7 +12161,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
rq_unlock_irqrestore(rq, &rf);
if (flags & NOHZ_BALANCE_KICK)
- rebalance_domains(rq, CPU_IDLE);
+ sched_balance_domains(rq, CPU_IDLE);
}
if (time_after(next_balance, rq->next_balance)) {
@@ -12422,7 +12422,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
/*
* If this CPU has a pending NOHZ_BALANCE_KICK, then do the
* balancing on behalf of the other idle CPUs whose ticks are
- * stopped. Do nohz_idle_balance *before* rebalance_domains to
+ * stopped. Do nohz_idle_balance *before* sched_balance_domains to
* give the idle CPUs a chance to load balance. Else we may
* load balance only within the local sched_domain hierarchy
* and abort nohz_idle_balance altogether if we pull some load.
@@ -12432,7 +12432,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
/* normal load balance */
update_blocked_averages(this_rq->cpu);
- rebalance_domains(this_rq, idle);
+ sched_balance_domains(this_rq, idle);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5b0ddb0e6017..41024c1c49b4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2904,7 +2904,7 @@ extern void cfs_bandwidth_usage_dec(void);
#define NOHZ_NEWILB_KICK_BIT 2
#define NOHZ_NEXT_KICK_BIT 3
-/* Run rebalance_domains() */
+/* Run sched_balance_domains() */
#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
/* Update blocked load */
#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)