aboutsummaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov2021-06-14 02:36:14 +0100
committerJens Axboe2021-06-14 08:23:12 -0600
commitcb3d8972c78ab0cdb55a30d6db927a3e0442b3f9 (patch)
tree92379da7dde9a34dc82ebaa402f2ad0bc34f1423 /fs/io_uring.c
parent382cb030469db3d428ada09e7925f684ba9d61cf (diff)
io_uring: refactor io_iopoll_req_issued
A simple refactoring of io_iopoll_req_issued(), move in_async inside so we don't pass it around and save on double checking it. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/1513bfde4f0c835be25ac69a82737ab0668d7665.1623634181.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index cdd9b53abbb2..6c0b3f91e1ad 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2525,9 +2525,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
* find it from a io_do_iopoll() thread before the issuer is done
* accessing the kiocb cookie.
*/
-static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
+static void io_iopoll_req_issued(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ const bool in_async = io_wq_current_is_worker();
+
+ /* workqueue context doesn't hold uring_lock, grab it now */
+ if (unlikely(in_async))
+ mutex_lock(&ctx->uring_lock);
/*
* Track whether we have multiple files in our lists. This will impact
@@ -2554,14 +2559,19 @@ static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
else
list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
- /*
- * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
- * task context or in io worker task context. If current task context is
- * sq thread, we don't need to check whether should wake up sq thread.
- */
- if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
- wq_has_sleeper(&ctx->sq_data->wait))
- wake_up(&ctx->sq_data->wait);
+ if (unlikely(in_async)) {
+ /*
+ * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
+ * in sq thread task context or in io worker task context. If
+ * current task context is sq thread, we don't need to check
+ * whether should wake up sq thread.
+ */
+ if ((ctx->flags & IORING_SETUP_SQPOLL) &&
+ wq_has_sleeper(&ctx->sq_data->wait))
+ wake_up(&ctx->sq_data->wait);
+
+ mutex_unlock(&ctx->uring_lock);
+ }
}
static inline void io_state_file_put(struct io_submit_state *state)
@@ -6215,23 +6225,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (creds)
revert_creds(creds);
-
if (ret)
return ret;
-
/* If the op doesn't have a file, we're not polling for it */
- if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
- const bool in_async = io_wq_current_is_worker();
-
- /* workqueue context doesn't hold uring_lock, grab it now */
- if (in_async)
- mutex_lock(&ctx->uring_lock);
-
- io_iopoll_req_issued(req, in_async);
-
- if (in_async)
- mutex_unlock(&ctx->uring_lock);
- }
+ if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
+ io_iopoll_req_issued(req);
return 0;
}