aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorVincent Guittot2023-04-11 11:06:11 +0200
committerGreg Kroah-Hartman2023-04-20 12:35:14 +0200
commit31c5ad43bdd14fd90221f4a93d764a923b0fae5e (patch)
treea4cc6a9d6e95ea073acc9a28c5db8a680f2a34d5 /kernel
parent98c77adf519dcf0cd42fdd3d43b46e49bf8e1d40 (diff)
sched/fair: Fix imbalance overflow
[ Upstream commit 91dcf1e8068e9a8823e419a7a34ff4341275fb70 ] When local group is fully busy but its average load is above system load, computing the imbalance will overflow and local group is not the best target for pulling this load. Fixes: 0b0695f2b34a ("sched/fair: Rework load_balance()") Reported-by: Tingjia Cao <tjcao980311@gmail.com> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Tingjia Cao <tjcao980311@gmail.com> Link: https://lore.kernel.org/lkml/CABcWv9_DAhVBOq2=W=2ypKE9dKM5s2DvoV8-U0+GDwwuKZ89jQ@mail.gmail.com/T/ Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 88821ab009b3..ec2d913280e6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10041,6 +10041,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
sds->total_capacity;
+
+ /*
+ * If the local group is more loaded than the average system
+ * load, don't try to pull any tasks.
+ */
+ if (local->avg_load >= sds->avg_load) {
+ env->imbalance = 0;
+ return;
+ }
+
}
/*