diff options
author | Anna-Maria Behnsen | 2024-02-21 10:05:48 +0100 |
---|---|---|
committer | Thomas Gleixner | 2024-02-22 17:52:32 +0100 |
commit | b2cf7507e18649a30512515ec0ca89f26b2c2d0f (patch) | |
tree | 0ee6441f5893dfffc3107cbe22bd72ceca24139a /kernel/time | |
parent | 36e40df35d2c1891fe58241640c7c95de4aa739b (diff) |
timers: Always queue timers on the local CPU
The timer pull model is in place so we can remove the heuristics which try
to guess the best target CPU at enqueue/modification time.
All non pinned timers are queued on the local CPU in the separate storage
and eventually pulled at expiry time to a remote CPU.
Originally-by: Richard Cochran (linutronix GmbH) <richardcochran@gmail.com>
Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240221090548.36600-21-anna-maria@linutronix.de
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/timer.c | 36 |
1 files changed, 15 insertions, 21 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 3ed135c8de43..4f4930da6448 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -635,11 +635,16 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) /* * We might have to IPI the remote CPU if the base is idle and the - * timer is not deferrable. If the other CPU is on the way to idle - * then it can't set base->is_idle as we hold the base lock: + * timer is pinned. If it is a non pinned timer, it is only queued + * on the remote CPU, when timer was running during queueing. Then + * everything is handled by remote CPU anyway. If the other CPU is + * on the way to idle then it can't set base->is_idle as we hold + * the base lock: */ - if (base->is_idle) + if (base->is_idle) { + WARN_ON_ONCE(!(timer->flags & TIMER_PINNED)); wake_up_nohz_cpu(base->cpu); + } } /* @@ -986,17 +991,6 @@ static inline struct timer_base *get_timer_base(u32 tflags) return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); } -static inline struct timer_base * -get_target_base(struct timer_base *base, unsigned tflags) -{ -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) - if (static_branch_likely(&timers_migration_enabled) && - !(tflags & TIMER_PINNED)) - return get_timer_cpu_base(tflags, get_nohz_timer_target()); -#endif - return get_timer_this_cpu_base(tflags); -} - static inline void __forward_timer_base(struct timer_base *base, unsigned long basej) { @@ -1151,7 +1145,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option if (!ret && (options & MOD_TIMER_PENDING_ONLY)) goto out_unlock; - new_base = get_target_base(base, timer->flags); + new_base = get_timer_this_cpu_base(timer->flags); if (base != new_base) { /* @@ -2297,7 +2291,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, * granularity skew (by design). */ if (!base_local->is_idle && time_after(nextevt, basej + 1)) { - base_local->is_idle = base_global->is_idle = true; + base_local->is_idle = true; trace_timer_base_idle(true, base_local->cpu); } *idle = base_local->is_idle; @@ -2363,13 +2357,13 @@ u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle) void timer_clear_idle(void) { /* - * We do this unlocked. The worst outcome is a remote enqueue sending - * a pointless IPI, but taking the lock would just make the window for - * sending the IPI a few instructions smaller for the cost of taking - * the lock in the exit from idle path. + * We do this unlocked. The worst outcome is a remote pinned timer + * enqueue sending a pointless IPI, but taking the lock would just + * make the window for sending the IPI a few instructions smaller + * for the cost of taking the lock in the exit from idle + * path. Required for BASE_LOCAL only. */ __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false); - __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false); trace_timer_base_idle(false, smp_processor_id()); /* Activate without holding the timer_base->lock */ |