diff options
author | Ingo Molnar | 2021-06-03 19:00:49 +0200 |
---|---|---|
committer | Ingo Molnar | 2021-06-03 19:00:49 +0200 |
commit | a9e906b71f963f19aabf7af59f73f37c929a5221 (patch) | |
tree | 0228f82f4719160cfa4635c0c042c9c73fc2d2a5 /kernel/sched | |
parent | 475ea6c60279e9f2ddf7e4cf2648cd8ae0608361 (diff) | |
parent | fcf6631f3736985ec89bdd76392d3c7bfb60119f (diff) |
Merge branch 'sched/urgent' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a2c30e52de76..ce625bf8024d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3453,10 +3453,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf static inline void update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { - long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; + long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; unsigned long load_avg; u64 load_sum = 0; - s64 delta_sum; u32 divider; if (!runnable_sum) @@ -3503,13 +3502,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq load_sum = (s64)se_weight(se) * runnable_sum; load_avg = div_s64(load_sum, divider); - delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; - delta_avg = load_avg - se->avg.load_avg; + delta = load_avg - se->avg.load_avg; se->avg.load_sum = runnable_sum; se->avg.load_avg = load_avg; - add_positive(&cfs_rq->avg.load_avg, delta_avg); - add_positive(&cfs_rq->avg.load_sum, delta_sum); + + add_positive(&cfs_rq->avg.load_avg, delta); + cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; } static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) @@ -3720,11 +3719,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s */ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { + /* + * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. + * See ___update_load_avg() for details. + */ + u32 divider = get_pelt_divider(&cfs_rq->avg); + dequeue_load_avg(cfs_rq, se); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); - sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); + cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); - sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); + cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); @@ -8047,7 +8052,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done) /* Propagate pending load changes to the parent, if any: */ se = cfs_rq->tg->se[cpu]; if (se && !skip_blocked_update(se)) - update_load_avg(cfs_rq_of(se), se, 0); + update_load_avg(cfs_rq_of(se), se, UPDATE_TG); /* * There can be a lot of idle CPU cgroups. Don't let fully |