From c3921ab71507b108d51a0f1ee960f80cd668a93d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 11 May 2008 16:04:48 -0700 Subject: Add new 'cond_resched_bkl()' helper function It acts exactly like a regular 'cond_resched()', but will not get optimized away when CONFIG_PREEMPT is set. Normal kernel code is already preemptable in the presense of CONFIG_PREEMPT, so cond_resched() is optimized away (see commit 02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 "sched: do not do cond_resched() when CONFIG_PREEMPT"). But when wanting to conditionally reschedule while holding a lock, you need to use "cond_sched_lock(lock)", and the new function is the BKL equivalent of that. Also make fs/locks.c use it. Signed-off-by: Linus Torvalds --- include/linux/sched.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 0c35b0343a76..4ab9f32f9238 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2037,13 +2037,13 @@ static inline int need_resched(void) * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */ +extern int _cond_resched(void); #ifdef CONFIG_PREEMPT static inline int cond_resched(void) { return 0; } #else -extern int _cond_resched(void); static inline int cond_resched(void) { return _cond_resched(); @@ -2051,6 +2051,10 @@ static inline int cond_resched(void) #endif extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); +static inline int cond_resched_bkl(void) +{ + return _cond_resched(); +} /* * Does a critical section need to be broken due to another -- cgit v1.2.3