diff options
author | Pavel Begunkov | 2024-03-08 13:55:57 +0000 |
---|---|---|
committer | Jens Axboe | 2024-03-08 07:58:23 -0700 |
commit | e0e4ab52d17096d96c21a6805ccd424b283c3c6d (patch) | |
tree | 9d97edbb6179d35279047cce542c89daa58381c1 | |
parent | 3a96378e22cc46c7c49b5911f6c8631527a133a9 (diff) |
io_uring: refactor DEFER_TASKRUN multishot checks
We disallow DEFER_TASKRUN multishots from running by io-wq, which is
checked by individual opcodes in the issue path. We can consolidate all
it in io_wq_submit_work() at the same time moving the checks out of the
hot path.
Suggested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e492f0f11588bb5aa11d7d24e6f53b7c7628afdb.1709905727.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | io_uring/io_uring.c | 20 | ||||
-rw-r--r-- | io_uring/net.c | 21 | ||||
-rw-r--r-- | io_uring/rw.c | 2 |
3 files changed, 20 insertions, 23 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index cf2f514b7cc0..cf348c33f485 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -944,6 +944,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags) u64 user_data = req->cqe.user_data; struct io_uring_cqe *cqe; + lockdep_assert(!io_wq_current_is_worker()); + if (!defer) return __io_post_aux_cqe(ctx, user_data, res, cflags, false); @@ -1968,6 +1970,24 @@ fail: goto fail; } + /* + * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the + * submitter task context. Final request completions are handed to the + * right context, however this is not the case of auxiliary CQEs, + * which is the main mean of operation for multishot requests. + * Don't allow any multishot execution from io-wq. It's more restrictive + * than necessary and also cleaner. + */ + if (req->flags & REQ_F_APOLL_MULTISHOT) { + err = -EBADFD; + if (!io_file_can_poll(req)) + goto fail; + err = -ECANCELED; + if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) + goto fail; + return; + } + if (req->flags & REQ_F_FORCE_ASYNC) { bool opcode_poll = def->pollin || def->pollout; diff --git a/io_uring/net.c b/io_uring/net.c index 62a5819779b5..86ec26a58bb0 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -78,19 +78,6 @@ struct io_sr_msg { */ #define MULTISHOT_MAX_RETRY 32 -static inline bool io_check_multishot(struct io_kiocb *req, - unsigned int issue_flags) -{ - /* - * When ->locked_cq is set we only allow to post CQEs from the original - * task context. Usual request completions will be handled in other - * generic paths but multipoll may decide to post extra cqes. - */ - return !(issue_flags & IO_URING_F_IOWQ) || - !(req->flags & REQ_F_APOLL_MULTISHOT) || - !req->ctx->task_complete; -} - int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); @@ -862,9 +849,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) (sr->flags & IORING_RECVSEND_POLL_FIRST)) return io_setup_async_msg(req, kmsg, issue_flags); - if (!io_check_multishot(req, issue_flags)) - return io_setup_async_msg(req, kmsg, issue_flags); - flags = sr->msg_flags; if (force_nonblock) flags |= MSG_DONTWAIT; @@ -956,9 +940,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) (sr->flags & IORING_RECVSEND_POLL_FIRST)) return -EAGAIN; - if (!io_check_multishot(req, issue_flags)) - return -EAGAIN; - sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; @@ -1408,8 +1389,6 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) struct file *file; int ret, fd; - if (!io_check_multishot(req, issue_flags)) - return -EAGAIN; retry: if (!fixed) { fd = __get_unused_fd_flags(accept->flags, accept->nofile); diff --git a/io_uring/rw.c b/io_uring/rw.c index 5651a5ad4e11..47e097ab5d7e 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -933,8 +933,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) */ if (!io_file_can_poll(req)) return -EBADFD; - if (issue_flags & IO_URING_F_IOWQ) - return -EAGAIN; ret = __io_read(req, issue_flags); |