aboutsummaryrefslogtreecommitdiff
path: root/fs/dax.c
diff options
context:
space:
mode:
authorDan Williams2018-12-21 11:35:53 -0800
committerDan Williams2018-12-21 11:35:53 -0800
commitd8a706414af4827fc0b4b1c0c631c607351938b9 (patch)
tree391f47712243481ab637dfb0ba4a9c73aea99d54 /fs/dax.c
parent7566ec393f4161572ba6f11ad5171fd5d59b0fbd (diff)
dax: Use non-exclusive wait in wait_entry_unlocked()
get_unlocked_entry() uses an exclusive wait because it is guaranteed to eventually obtain the lock and follow on with an unlock+wakeup cycle. The wait_entry_unlocked() path does not have the same guarantee. Rather than open-code an extra wakeup, just switch to a non-exclusive wait. Cc: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <willy@infradead.org> Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 48132eca3761..042d3b31b413 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -246,18 +246,16 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
ewait.wait.func = wake_exceptional_entry_func;
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
- prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+ /*
+ * Unlike get_unlocked_entry() there is no guarantee that this
+ * path ever successfully retrieves an unlocked entry before an
+ * inode dies. Perform a non-exclusive wait in case this path
+ * never successfully performs its own wake up.
+ */
+ prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
xas_unlock_irq(xas);
schedule();
finish_wait(wq, &ewait.wait);
-
- /*
- * Entry lock waits are exclusive. Wake up the next waiter since
- * we aren't sure we will acquire the entry lock and thus wake
- * the next waiter up on unlock.
- */
- if (waitqueue_active(wq))
- __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
}
static void put_unlocked_entry(struct xa_state *xas, void *entry)