diff options
author | Hao Jia | 2023-03-16 16:18:06 +0800 |
---|---|---|
committer | Peter Zijlstra | 2023-03-22 10:10:58 +0100 |
commit | 530bfad1d53d103f98cec66a3e491a36d397884d (patch) | |
tree | d770ca67d960ed90c160e70f733a430ed0f5b1a6 /kernel/sched/rt.c | |
parent | d91e15a21d4b3823ce93a42b05f0d171689f4e6a (diff) |
sched/core: Avoid selecting the task that is throttled to run when core-sched enable
When {rt, cfs}_rq or dl task is throttled, since cookied tasks
are not dequeued from the core tree, So sched_core_find() and
sched_core_next() may return throttled task, which may
cause throttled task to run on the CPU.
So we add checks in sched_core_find() and sched_core_next()
to make sure that the return is a runnable task that is
not throttled.
Co-developed-by: Cruz Zhao <CruzZhao@linux.alibaba.com>
Signed-off-by: Cruz Zhao <CruzZhao@linux.alibaba.com>
Signed-off-by: Hao Jia <jiahao.os@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20230316081806.69544-1-jiahao.os@bytedance.com
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0a11f44adee5..9d67dfbf1000 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2677,6 +2677,21 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) return 0; } +#ifdef CONFIG_SCHED_CORE +static int task_is_throttled_rt(struct task_struct *p, int cpu) +{ + struct rt_rq *rt_rq; + +#ifdef CONFIG_RT_GROUP_SCHED + rt_rq = task_group(p)->rt_rq[cpu]; +#else + rt_rq = &cpu_rq(cpu)->rt; +#endif + + return rt_rq_throttled(rt_rq); +} +#endif + DEFINE_SCHED_CLASS(rt) = { .enqueue_task = enqueue_task_rt, @@ -2710,6 +2725,10 @@ DEFINE_SCHED_CLASS(rt) = { .update_curr = update_curr_rt, +#ifdef CONFIG_SCHED_CORE + .task_is_throttled = task_is_throttled_rt, +#endif + #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, #endif |