aboutsummaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov2023-01-09 14:46:11 +0000
committerJens Axboe2023-01-29 15:17:40 -0700
commit130bd686d9be918e4cc8c03abf5794ba2d860502 (patch)
tree7db8b4728da7507c82b246711843384ba3b3b0b9 /io_uring
parent3181e22fb79910c7071e84a43af93ac89e8a7106 (diff)
io_uring: waitqueue-less cq waiting
With DEFER_TASKRUN only ctx->submitter_task might be waiting for CQEs, we can use this to optimise io_cqring_wait(). Replace ->cq_wait waitqueue with waking the task directly. It works but misses an important optimisation covered by the following patch, so this patch without follow ups might hurt performance. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/103d174d35d919d4cb0922d8a9c93a8f0c35f74a.1673274244.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f2e1dd076d98..be26829f7d20 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1263,7 +1263,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
percpu_ref_put(&ctx->refs);
return;
}
- /* need it for the following io_cqring_wake() */
+ /* needed for the following wake up */
smp_mb__after_atomic();
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
@@ -1274,10 +1274,9 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
-
if (ctx->has_evfd)
io_eventfd_signal(ctx);
- __io_cqring_wake(ctx);
+ wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
percpu_ref_put(&ctx->refs);
}
@@ -2576,12 +2575,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
do {
unsigned long check_cq;
- prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
- TASK_INTERRUPTIBLE);
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ } else {
+ prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
+ TASK_INTERRUPTIBLE);
+ }
+
ret = io_cqring_wait_schedule(ctx, &iowq);
+ __set_current_state(TASK_RUNNING);
if (ret < 0)
break;
- __set_current_state(TASK_RUNNING);
/*
* Run task_work after scheduling and before io_should_wake().
* If we got woken because of task_work being processed, run it
@@ -2609,7 +2613,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
cond_resched();
} while (1);
- finish_wait(&ctx->cq_wait, &iowq.wq);
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ finish_wait(&ctx->cq_wait, &iowq.wq);
restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;