aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMatthew Wilcox2008-03-14 13:19:33 -0400
committerMatthew Wilcox2008-04-17 10:42:40 -0400
commitf06d96865861c3dd01520f47e2e61c899db1631f (patch)
treed2fbd08af06a96b10192f7b27dd3ea2df1bb93d8 /kernel
parent64ac24e738823161693bf791f87adc802cf529ff (diff)
Introduce down_killable()
down_killable() is the functional counterpart of mutex_lock_killable. Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/semaphore.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index d5a72702f261..2da2aed950f3 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -34,6 +34,7 @@
static noinline void __down(struct semaphore *sem);
static noinline int __down_interruptible(struct semaphore *sem);
+static noinline int __down_killable(struct semaphore *sem);
static noinline void __up(struct semaphore *sem);
void down(struct semaphore *sem)
@@ -61,6 +62,20 @@ int down_interruptible(struct semaphore *sem)
}
EXPORT_SYMBOL(down_interruptible);
+int down_killable(struct semaphore *sem)
+{
+ unsigned long flags;
+ int result = 0;
+
+ spin_lock_irqsave(&sem->lock, flags);
+ if (unlikely(sem->count-- <= 0))
+ result = __down_killable(sem);
+ spin_unlock_irqrestore(&sem->lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(down_killable);
+
/**
* down_trylock - try to acquire the semaphore, without waiting
* @sem: the semaphore to be acquired
@@ -143,6 +158,8 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
for (;;) {
if (state == TASK_INTERRUPTIBLE && signal_pending(task))
goto interrupted;
+ if (state == TASK_KILLABLE && fatal_signal_pending(task))
+ goto interrupted;
__set_task_state(task, state);
spin_unlock_irq(&sem->lock);
schedule();
@@ -178,6 +195,11 @@ static noinline int __sched __down_interruptible(struct semaphore *sem)
return __down_common(sem, TASK_INTERRUPTIBLE);
}
+static noinline int __sched __down_killable(struct semaphore *sem)
+{
+ return __down_common(sem, TASK_KILLABLE);
+}
+
static noinline void __sched __up(struct semaphore *sem)
{
if (unlikely(list_empty(&sem->wait_list)))