aboutsummaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov2022-06-15 17:33:55 +0100
committerJens Axboe2022-07-24 18:39:13 -0600
commit3a08576b96e365d424225dd034c651e963b3ae64 (patch)
treec6c36b9fb532662217e7cb852f5d4de3cd80ff29 /io_uring
parentaeaa72c69473d7e68addbd31f43c7c12af252bfc (diff)
io_uring: remove check_cq checking from hot paths
All ctx->check_cq events are slow path, don't test every single flag one by one in the hot path, but add a common guarding if. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/dff026585cea7ff3a172a7c83894a3b0111bbf6a.1655310733.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8a8d8b323519..a4c1746d0691 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1259,24 +1259,25 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
int ret = 0;
unsigned long check_cq;
+ check_cq = READ_ONCE(ctx->check_cq);
+ if (unlikely(check_cq)) {
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+ __io_cqring_overflow_flush(ctx, false);
+ /*
+ * Similarly do not spin if we have not informed the user of any
+ * dropped CQE.
+ */
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+ return -EBADR;
+ }
/*
* Don't enter poll loop if we already have events pending.
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- check_cq = READ_ONCE(ctx->check_cq);
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- __io_cqring_overflow_flush(ctx, false);
if (io_cqring_events(ctx))
return 0;
- /*
- * Similarly do not spin if we have not informed the user of any
- * dropped CQE.
- */
- if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
- return -EBADR;
-
do {
/*
* If a submit got punted to a workqueue, we can have the
@@ -2203,12 +2204,15 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
ret = io_run_task_work_sig();
if (ret || io_should_wake(iowq))
return ret;
+
check_cq = READ_ONCE(ctx->check_cq);
- /* let the caller flush overflows, retry */
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- return 1;
- if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
- return -EBADR;
+ if (unlikely(check_cq)) {
+ /* let the caller flush overflows, retry */
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+ return 1;
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+ return -EBADR;
+ }
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
return -ETIME;
return 1;