diff options
author | Oleg Nesterov | 2007-10-16 23:27:23 -0700 |
---|---|---|
committer | Linus Torvalds | 2007-10-17 08:42:54 -0700 |
commit | 6db840fa7887980ef68a649640d506fe069eef0c (patch) | |
tree | 6248c1e65b572f1c2b14c46848e5a18df003f60e /fs | |
parent | 356d6d5058c8082b9e811838ab2fa27825c947e4 (diff) |
exec: RT sub-thread can livelock and monopolize CPU on exec
de_thread() yields waiting for ->group_leader to be a zombie. This deadlocks
if an rt-prio execer shares the same cpu with ->group_leader. Change the code
to use ->group_exit_task/notify_count mechanics.
This patch certainly uglifies the code, perhaps someone can suggest something
better.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/exec.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/fs/exec.c b/fs/exec.c index ab5a4a3ece46..aa470a93540a 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -801,16 +801,15 @@ static int de_thread(struct task_struct *tsk) hrtimer_restart(&sig->real_timer); spin_lock_irq(lock); } + + sig->notify_count = count; + sig->group_exit_task = tsk; while (atomic_read(&sig->count) > count) { - sig->group_exit_task = tsk; - sig->notify_count = count; __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(lock); schedule(); spin_lock_irq(lock); } - sig->group_exit_task = NULL; - sig->notify_count = 0; spin_unlock_irq(lock); /* @@ -819,14 +818,17 @@ static int de_thread(struct task_struct *tsk) * and to assume its PID: */ if (!thread_group_leader(tsk)) { - /* - * Wait for the thread group leader to be a zombie. - * It should already be zombie at this point, most - * of the time. - */ leader = tsk->group_leader; - while (leader->exit_state != EXIT_ZOMBIE) - yield(); + + sig->notify_count = -1; + for (;;) { + write_lock_irq(&tasklist_lock); + if (likely(leader->exit_state)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); + write_unlock_irq(&tasklist_lock); + schedule(); + } /* * The only record we have of the real-time age of a @@ -840,8 +842,6 @@ static int de_thread(struct task_struct *tsk) */ tsk->start_time = leader->start_time; - write_lock_irq(&tasklist_lock); - BUG_ON(leader->tgid != tsk->tgid); BUG_ON(tsk->pid == tsk->tgid); /* @@ -874,6 +874,8 @@ static int de_thread(struct task_struct *tsk) write_unlock_irq(&tasklist_lock); } + sig->group_exit_task = NULL; + sig->notify_count = 0; /* * There may be one thread left which is just exiting, * but it's safe to stop telling the group to kill themselves. |