aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov2021-06-17 18:14:07 +0100
committerJens Axboe2021-06-18 09:22:02 -0600
commitebd0df2e63426bbd9ed50966e888c87eac88fc30 (patch)
tree816227ee88250df69422c7f1b0006f404d6b6fc0 /fs
parent3f18407dc6f2db0968daaa36c39a772c2c9f8ea7 (diff)
io_uring: optimise task_work submit flushing
tctx_task_work() tries to fetch a next batch of requests, but before it would flush completions from the previous batch that may be sub-optimal. E.g. io_req_task_queue() executes a head of the link where all the linked may be enqueued through the same io_req_task_queue(). And there are more cases for that. Do the flushing at the end, so it can cache completions of several waves of a single tctx_task_work(), and do the flush at the very end. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/3cac83934e4fbce520ff8025c3524398b3ae0270.1623949695.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index dc71850d7a49..49f06484ba0e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1890,13 +1890,13 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx)
static void tctx_task_work(struct callback_head *cb)
{
+ struct io_ring_ctx *ctx = NULL;
struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
task_work);
clear_bit(0, &tctx->task_state);
while (!wq_list_empty(&tctx->task_list)) {
- struct io_ring_ctx *ctx = NULL;
struct io_wq_work_list list;
struct io_wq_work_node *node;
@@ -1920,11 +1920,12 @@ static void tctx_task_work(struct callback_head *cb)
node = next;
}
- ctx_flush_and_put(ctx);
if (!list.first)
break;
cond_resched();
}
+
+ ctx_flush_and_put(ctx);
}
static int io_req_task_work_add(struct io_kiocb *req)