From a1dfb6311c7739e21e160bc4c5575a1b21b48c87 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Thu, 13 May 2021 01:29:22 +0200 Subject: tick/nohz: Kick only _queued_ task whose tick dependency is updated When the tick dependency of a task is updated, we want it to aknowledge the new state and restart the tick if needed. If the task is not running, we don't need to kick it because it will observe the new dependency upon scheduling in. But if the task is running, we may need to send an IPI to it so that it gets notified. Unfortunately we don't have the means to check if a task is running in a race free way. Checking p->on_cpu in a synchronized way against p->tick_dep_mask would imply adding a full barrier between prepare_task_switch() and tick_nohz_task_switch(), which we want to avoid in this fast-path. Therefore we blindly fire an IPI to the task's CPU. Meanwhile we can check if the task is queued on the CPU rq because p->on_rq is always set to TASK_ON_RQ_QUEUED _before_ schedule() and its full barrier that precedes tick_nohz_task_switch(). And if the task is queued on a nohz_full CPU, it also has fair chances to be running as the isolation constraints prescribe running single tasks on full dynticks CPUs. So use this as a trick to check if we can spare an IPI toward a non-running task. NOTE: For the ordering to be correct, it is assumed that we never deactivate a task while it is running, the only exception being the task deactivating itself while scheduling out. Suggested-by: Peter Zijlstra Signed-off-by: Marcelo Tosatti Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Link: https://lore.kernel.org/r/20210512232924.150322-9-frederic@kernel.org --- kernel/sched/core.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5226cc26a095..78e480f7881a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1590,6 +1590,11 @@ static inline void uclamp_post_fork(struct task_struct *p) { } static inline void init_uclamp(void) { } #endif /* CONFIG_UCLAMP_TASK */ +bool sched_task_on_rq(struct task_struct *p) +{ + return task_on_rq_queued(p); +} + static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { if (!(flags & ENQUEUE_NOCLOCK)) -- cgit v1.2.3 From 0fdcccfafcffac70b452b3127cc3d981f0117655 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 13 May 2021 01:29:23 +0200 Subject: tick/nohz: Call tick_nohz_task_switch() with interrupts disabled Call tick_nohz_task_switch() slightly earlier after the context switch to benefit from disabled IRQs. This way the function doesn't need to disable them once more. Signed-off-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210512232924.150322-10-frederic@kernel.org --- kernel/sched/core.c | 2 +- kernel/time/tick-sched.c | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 78e480f7881a..8f86ac28877e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4212,6 +4212,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) vtime_task_switch(prev); perf_event_task_sched_in(prev, current); finish_task(prev); + tick_nohz_task_switch(); finish_lock_switch(rq); finish_arch_post_lock_switch(); kcov_finish_switch(current); @@ -4257,7 +4258,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) put_task_struct_rcu_user(prev); } - tick_nohz_task_switch(); return rq; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 197a3bd882ad..6ea619d644fa 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -487,13 +487,10 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi */ void __tick_nohz_task_switch(void) { - unsigned long flags; struct tick_sched *ts; - local_irq_save(flags); - if (!tick_nohz_full_cpu(smp_processor_id())) - goto out; + return; ts = this_cpu_ptr(&tick_cpu_sched); @@ -502,8 +499,6 @@ void __tick_nohz_task_switch(void) atomic_read(¤t->signal->tick_dep_mask)) tick_nohz_full_kick(); } -out: - local_irq_restore(flags); } /* Get the boot-time nohz CPU list from the kernel parameters. */ -- cgit v1.2.3