diff options
author | Pavel Begunkov | 2022-06-17 09:48:01 +0100 |
---|---|---|
committer | Jens Axboe | 2022-07-24 18:39:14 -0600 |
commit | faf88dde060f74117b3a86a62cb32a20f27fd636 (patch) | |
tree | efcc06f04689767ebe25ab240638bb1a929a910e /io_uring/io_uring.h | |
parent | d245bca6375bccfd589a6a7d5007df28575bb626 (diff) |
io_uring: don't inline __io_get_cqe()
__io_get_cqe() is not as hot as io_get_cqe(), no need to inline it, it
sheds ~500B from the binary.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c1ac829198a881b7af8710926f99a3559b9f24c0.1655455613.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r-- | io_uring/io_uring.h | 36 |
1 files changed, 1 insertions, 35 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 18754fb79025..94bd6732f558 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -14,44 +14,10 @@ enum { IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, }; +struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx); bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, u64 extra1, u64 extra2); -static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) -{ - return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); -} - -/* - * writes to the cq entry need to come after reading head; the - * control dependency is enough as we're using WRITE_ONCE to - * fill the cq entry - */ -static inline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) -{ - struct io_rings *rings = ctx->rings; - unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); - unsigned int shift = 0; - unsigned int free, queued, len; - - if (ctx->flags & IORING_SETUP_CQE32) - shift = 1; - - /* userspace may cheat modifying the tail, be safe and do min */ - queued = min(__io_cqring_events(ctx), ctx->cq_entries); - free = ctx->cq_entries - queued; - /* we need a contiguous range, limit based on the current array offset */ - len = min(free, ctx->cq_entries - off); - if (!len) - return NULL; - - ctx->cached_cq_tail++; - ctx->cqe_cached = &rings->cqes[off]; - ctx->cqe_sentinel = ctx->cqe_cached + len; - ctx->cqe_cached++; - return &rings->cqes[off << shift]; -} - static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) { if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { |