diff options
author | Peter Zijlstra | 2021-12-25 01:04:57 +0100 |
---|---|---|
committer | Peter Zijlstra | 2022-01-18 12:09:59 +0100 |
commit | 7e406d1ff39b8ee574036418a5043c86723170cf (patch) | |
tree | be7b54af0c125126592596a0a37b95bb3d875355 /kernel | |
parent | a315da5e686b02b20c1713dda818e8fb691526bb (diff) |
sched: Avoid double preemption in __cond_resched_*lock*()
For PREEMPT/DYNAMIC_PREEMPT the *_unlock() will already trigger a
preemption, no point in then calling preempt_schedule_common()
*again*.
Use _cond_resched() instead, since this is a NOP for the preemptible
configs while it provide a preemption point for the others.
Reported-by: xuhaifeng <xuhaifeng@oppo.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/YcGnvDEYBwOiV0cR@hirez.programming.kicks-ass.net
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 12 |
1 files changed, 3 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0d2ab2a2f9fe..56b428c8ea96 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8218,9 +8218,7 @@ int __cond_resched_lock(spinlock_t *lock) if (spin_needbreak(lock) || resched) { spin_unlock(lock); - if (resched) - preempt_schedule_common(); - else + if (!_cond_resched()) cpu_relax(); ret = 1; spin_lock(lock); @@ -8238,9 +8236,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock) if (rwlock_needbreak(lock) || resched) { read_unlock(lock); - if (resched) - preempt_schedule_common(); - else + if (!_cond_resched()) cpu_relax(); ret = 1; read_lock(lock); @@ -8258,9 +8254,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock) if (rwlock_needbreak(lock) || resched) { write_unlock(lock); - if (resched) - preempt_schedule_common(); - else + if (!_cond_resched()) cpu_relax(); ret = 1; write_lock(lock); |