aboutsummaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov2022-07-07 15:13:15 +0100
committerJens Axboe2022-07-24 18:39:17 -0600
commitceff501790a9789c748a9b851f30f4f7e2fe4d72 (patch)
treea5fdf42fb282f41f465afd44abb0884567c992b1 /io_uring
parent7a121ced6e6430d49fb802067b4f020f6df62362 (diff)
io_uring: don't race double poll setting REQ_F_ASYNC_DATA
Just as with io_poll_double_prepare() setting REQ_F_DOUBLE_POLL, we can race with the first poll entry when setting REQ_F_ASYNC_DATA. Move it under io_poll_double_prepare(). Fixes: a18427bb2d9b ("io_uring: optimise submission side poll_refs") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/df6920f509c11115aa2bce8b34dc5fdb0eb98920.1657203020.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/poll.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 3710a0a46a87..c1359d45a396 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -410,6 +410,8 @@ static void io_poll_double_prepare(struct io_kiocb *req)
spin_lock_irq(&head->lock);
req->flags |= REQ_F_DOUBLE_POLL;
+ if (req->opcode == IORING_OP_POLL_ADD)
+ req->flags |= REQ_F_ASYNC_DATA;
if (head)
spin_unlock_irq(&head->lock);
@@ -448,13 +450,11 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
return;
}
- io_poll_double_prepare(req);
/* mark as double wq entry */
wqe_private |= IO_WQE_F_DOUBLE;
io_init_poll_iocb(poll, first->events, first->wait.func);
+ io_poll_double_prepare(req);
*poll_ptr = poll;
- if (req->opcode == IORING_OP_POLL_ADD)
- req->flags |= REQ_F_ASYNC_DATA;
} else {
/* fine to modify, there is no poll queued to race with us */
req->flags |= REQ_F_SINGLE_POLL;