aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJens Axboe2020-11-16 13:36:24 -0700
committerJens Axboe2020-11-16 13:39:34 -0700
commit0abed7c69b956d135cb6d320c350b2adb213e7d8 (patch)
treeaacd8b10ea45a282d1a9811cd6d8b32fe36a8506 /mm
parent944d1444d53f5a213457e5096db370cfd06923d4 (diff)
mm: never attempt async page lock if we've transferred data already
We catch the case where we enter generic_file_buffered_read() with data already transferred, but we also need to be careful not to allow an async page lock if we're looping transferring data. If not, we could be returning -EIOCBQUEUED instead of the transferred amount, and it could result in double waitqueue additions as well. Cc: stable@vger.kernel.org # v5.9 Fixes: 1a0a7853b901 ("mm: support async buffered reads in generic_file_buffered_read()") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index d5e7c2029d16..3ebbe64a0106 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2347,10 +2347,15 @@ page_ok:
page_not_up_to_date:
/* Get exclusive access to the page ... */
- if (iocb->ki_flags & IOCB_WAITQ)
+ if (iocb->ki_flags & IOCB_WAITQ) {
+ if (written) {
+ put_page(page);
+ goto out;
+ }
error = lock_page_async(page, iocb->ki_waitq);
- else
+ } else {
error = lock_page_killable(page);
+ }
if (unlikely(error))
goto readpage_error;
@@ -2393,10 +2398,15 @@ readpage:
}
if (!PageUptodate(page)) {
- if (iocb->ki_flags & IOCB_WAITQ)
+ if (iocb->ki_flags & IOCB_WAITQ) {
+ if (written) {
+ put_page(page);
+ goto out;
+ }
error = lock_page_async(page, iocb->ki_waitq);
- else
+ } else {
error = lock_page_killable(page);
+ }
if (unlikely(error))
goto readpage_error;