From 8e29da69feade64ec7fe9e1a2824b967c5183a21 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Apr 2022 10:44:00 -0600 Subject: io_uring: add support for IORING_ASYNC_CANCEL_ALL The current cancelation will lookup and cancel the first request it finds based on the key passed in. Add a flag that allows to cancel any request that matches they key. It completes with the number of requests found and canceled, or res < 0 if an error occured. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20220418164402.75259-4-axboe@kernel.dk --- include/uapi/linux/io_uring.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1845cf7c80ba..476e58a2837f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -187,6 +187,13 @@ enum { #define IORING_POLL_UPDATE_EVENTS (1U << 1) #define IORING_POLL_UPDATE_USER_DATA (1U << 2) +/* + * ASYNC_CANCEL flags. + * + * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + */ +#define IORING_ASYNC_CANCEL_ALL (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3 From 4bf94615b8886305199ed5755cb72fea88258d15 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Apr 2022 10:44:01 -0600 Subject: io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Currently sqe->addr must contain the user_data of the request being canceled. Introduce the IORING_ASYNC_CANCEL_FD flag, which tells the kernel that we're keying off the file fd instead for cancelation. This allows canceling any request that a) uses a file, and b) was assigned the file based on the value being passed in. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20220418164402.75259-5-axboe@kernel.dk --- fs/io_uring.c | 67 ++++++++++++++++++++++++++++++++++++++----- include/uapi/linux/io_uring.h | 3 ++ 2 files changed, 63 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index b43cdf1a4555..cf0d5437b77d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -587,6 +587,7 @@ struct io_cancel { struct file *file; u64 addr; u32 flags; + s32 fd; }; struct io_timeout { @@ -992,7 +993,10 @@ struct io_defer_entry { struct io_cancel_data { struct io_ring_ctx *ctx; - u64 data; + union { + u64 data; + struct file *file; + }; u32 flags; int seq; }; @@ -6332,6 +6336,29 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, return NULL; } +static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, + struct io_cancel_data *cd) + __must_hold(&ctx->completion_lock) +{ + struct io_kiocb *req; + int i; + + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list; + + list = &ctx->cancel_hash[i]; + hlist_for_each_entry(req, list, hash_node) { + if (req->file != cd->file) + continue; + if (cd->seq == req->work.cancel_seq) + continue; + req->work.cancel_seq = cd->seq; + return req; + } + } + return NULL; +} + static bool io_poll_disarm(struct io_kiocb *req) __must_hold(&ctx->completion_lock) { @@ -6345,8 +6372,12 @@ static bool io_poll_disarm(struct io_kiocb *req) static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, false, cd); + struct io_kiocb *req; + if (cd->flags & IORING_ASYNC_CANCEL_FD) + req = io_poll_file_find(ctx, cd); + else + req = io_poll_find(ctx, false, cd); if (!req) return -ENOENT; io_poll_cancel_req(req); @@ -6796,8 +6827,13 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) if (req->ctx != cd->ctx) return false; - if (req->cqe.user_data != cd->data) - return false; + if (cd->flags & IORING_ASYNC_CANCEL_FD) { + if (req->file != cd->file) + return false; + } else { + if (req->cqe.user_data != cd->data) + return false; + } if (cd->flags & IORING_ASYNC_CANCEL_ALL) { if (cd->seq == req->work.cancel_seq) return false; @@ -6851,7 +6887,8 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) ret = io_poll_cancel(ctx, cd); if (ret != -ENOENT) goto out; - ret = io_timeout_cancel(ctx, cd); + if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) + ret = io_timeout_cancel(ctx, cd); out: spin_unlock(&ctx->completion_lock); return ret; @@ -6862,15 +6899,17 @@ static int io_async_cancel_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + if (unlikely(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); req->cancel.flags = READ_ONCE(sqe->cancel_flags); - if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL) + if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD)) return -EINVAL; + if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) + req->cancel.fd = READ_ONCE(sqe->fd); return 0; } @@ -6919,7 +6958,21 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) }; int ret; + if (cd.flags & IORING_ASYNC_CANCEL_FD) { + if (req->flags & REQ_F_FIXED_FILE) + req->file = io_file_get_fixed(req, req->cancel.fd, + issue_flags); + else + req->file = io_file_get_normal(req, req->cancel.fd); + if (!req->file) { + ret = -EBADF; + goto done; + } + cd.file = req->file; + } + ret = __io_async_cancel(&cd, req, issue_flags); +done: if (ret < 0) req_set_fail(req); io_req_complete_post(req, ret, 0); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 476e58a2837f..cc7fe82a1798 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -191,8 +191,11 @@ enum { * ASYNC_CANCEL flags. * * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the + * request 'user_data' */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) +#define IORING_ASYNC_CANCEL_FD (1U << 1) /* * IO completion data structure (Completion Queue Entry) -- cgit v1.2.3 From 970f256edb8c1259c8ed48d52b38215135396126 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Apr 2022 10:44:02 -0600 Subject: io_uring: add support for IORING_ASYNC_CANCEL_ANY Rather than match on a specific key, be it user_data or file, allow canceling any request that we can lookup. Works like IORING_ASYNC_CANCEL_ALL in that it cancels multiple requests, but it doesn't key off user_data or the file. Can't be set with IORING_ASYNC_CANCEL_FD, as that's a key selector. Only one may be used at the time. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20220418164402.75259-6-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 39 +++++++++++++++++++++++++-------------- include/uapi/linux/io_uring.h | 2 ++ 2 files changed, 27 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index cf0d5437b77d..03134ec070a3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6348,7 +6348,8 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, list = &ctx->cancel_hash[i]; hlist_for_each_entry(req, list, hash_node) { - if (req->file != cd->file) + if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && + req->file != cd->file) continue; if (cd->seq == req->work.cancel_seq) continue; @@ -6374,7 +6375,7 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) { struct io_kiocb *req; - if (cd->flags & IORING_ASYNC_CANCEL_FD) + if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) req = io_poll_file_find(ctx, cd); else req = io_poll_find(ctx, false, cd); @@ -6543,9 +6544,10 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, bool found = false; list_for_each_entry(req, &ctx->timeout_list, timeout.list) { - if (cd->data != req->cqe.user_data) + if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && + cd->data != req->cqe.user_data) continue; - if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { if (cd->seq == req->work.cancel_seq) continue; req->work.cancel_seq = cd->seq; @@ -6827,14 +6829,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) if (req->ctx != cd->ctx) return false; - if (cd->flags & IORING_ASYNC_CANCEL_FD) { + if (cd->flags & IORING_ASYNC_CANCEL_ANY) { + ; + } else if (cd->flags & IORING_ASYNC_CANCEL_FD) { if (req->file != cd->file) return false; } else { if (req->cqe.user_data != cd->data) return false; } - if (cd->flags & IORING_ASYNC_CANCEL_ALL) { + if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { if (cd->seq == req->work.cancel_seq) return false; req->work.cancel_seq = cd->seq; @@ -6847,12 +6851,13 @@ static int io_async_cancel_one(struct io_uring_task *tctx, { enum io_wq_cancel cancel_ret; int ret = 0; + bool all; if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, - cd->flags & IORING_ASYNC_CANCEL_ALL); + all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6894,6 +6899,9 @@ out: return ret; } +#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \ + IORING_ASYNC_CANCEL_ANY) + static int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -6906,10 +6914,13 @@ static int io_async_cancel_prep(struct io_kiocb *req, req->cancel.addr = READ_ONCE(sqe->addr); req->cancel.flags = READ_ONCE(sqe->cancel_flags); - if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD)) + if (req->cancel.flags & ~CANCEL_FLAGS) return -EINVAL; - if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) + if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) { + if (req->cancel.flags & IORING_ASYNC_CANCEL_ANY) + return -EINVAL; req->cancel.fd = READ_ONCE(sqe->fd); + } return 0; } @@ -6917,7 +6928,7 @@ static int io_async_cancel_prep(struct io_kiocb *req, static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, unsigned int issue_flags) { - bool cancel_all = cd->flags & IORING_ASYNC_CANCEL_ALL; + bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY); struct io_ring_ctx *ctx = cd->ctx; struct io_tctx_node *node; int ret, nr = 0; @@ -6926,7 +6937,7 @@ static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, ret = io_try_cancel(req, cd); if (ret == -ENOENT) break; - if (!cancel_all) + if (!all) return ret; nr++; } while (1); @@ -6939,13 +6950,13 @@ static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, ret = io_async_cancel_one(tctx, cd); if (ret != -ENOENT) { - if (!cancel_all) + if (!all) break; nr++; } } io_ring_submit_unlock(ctx, issue_flags); - return cancel_all ? nr : ret; + return all ? nr : ret; } static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index cc7fe82a1798..980d82eb196e 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -193,9 +193,11 @@ enum { * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the * request 'user_data' + * IORING_ASYNC_CANCEL_ANY Match any request */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) #define IORING_ASYNC_CANCEL_FD (1U << 1) +#define IORING_ASYNC_CANCEL_ANY (1U << 2) /* * IO completion data structure (Completion Queue Entry) -- cgit v1.2.3 From 47894438e916ca3fe44db33c2d4a1670fd296d6f Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Thu, 21 Apr 2022 02:13:40 -0700 Subject: io_uring: add trace support for CQE overflow Add trace function for overflowing CQ ring. Signed-off-by: Dylan Yudaken Link: https://lore.kernel.org/r/20220421091345.2115755-2-dylany@fb.com Signed-off-by: Jens Axboe --- include/trace/events/io_uring.h | 42 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index cddf5b6fbeb4..42534ec2ab9d 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -530,7 +530,7 @@ TRACE_EVENT(io_uring_req_failed, ), TP_printk("ring %p, req %p, user_data 0x%llx, " - "op %d, flags 0x%x, prio=%d, off=%llu, addr=%llu, " + "op %d, flags 0x%x, prio=%d, off=%llu, addr=%llu, " "len=%u, rw_flags=0x%x, buf_index=%d, " "personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d", __entry->ctx, __entry->req, __entry->user_data, @@ -543,6 +543,46 @@ TRACE_EVENT(io_uring_req_failed, (unsigned long long) __entry->pad2, __entry->error) ); + +/* + * io_uring_cqe_overflow - a CQE overflowed + * + * @ctx: pointer to a ring context structure + * @user_data: user data associated with the request + * @res: CQE result + * @cflags: CQE flags + * @ocqe: pointer to the overflow cqe (if available) + * + */ +TRACE_EVENT(io_uring_cqe_overflow, + + TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags, + void *ocqe), + + TP_ARGS(ctx, user_data, res, cflags, ocqe), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( unsigned long long, user_data ) + __field( s32, res ) + __field( u32, cflags ) + __field( void *, ocqe ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->user_data = user_data; + __entry->res = res; + __entry->cflags = cflags; + __entry->ocqe = ocqe; + ), + + TP_printk("ring %p, user_data 0x%llx, res %d, flags %x, " + "overflow_cqe %p", + __entry->ctx, __entry->user_data, __entry->res, + __entry->cflags, __entry->ocqe) +); + #endif /* _TRACE_IO_URING_H */ /* This part must be outside protection */ -- cgit v1.2.3 From e788be95a57a9bebe446878ce9bf2750f6fe4974 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 28 Apr 2022 17:25:16 -0600 Subject: task_work: allow TWA_SIGNAL without a rescheduling IPI Some use cases don't always need an IPI when sending a TWA_SIGNAL notification. Add TWA_SIGNAL_NO_IPI, which is just like TWA_SIGNAL, except it doesn't send an IPI to the target task. It merely sets TIF_NOTIFY_SIGNAL and wakes up the task. This can be useful in avoiding a forceful transition to the kernel if the task is running in userspace. Depending on the task_work in question, it may be quite fine waiting for the next reschedule or kernel enter anyway, or the use case may even have other mechanisms for hinting to the task that a transition may be useful. This can drive more cooperative scheduling of task_work. Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/821f42b6-7d91-8074-8212-d34998097de4@kernel.dk Signed-off-by: Jens Axboe --- include/linux/sched/signal.h | 13 +++++++++++-- include/linux/task_work.h | 1 + kernel/task_work.c | 25 +++++++++++++++++++------ 3 files changed, 31 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 3c8b34876744..66b689f6cfcb 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -355,14 +355,23 @@ static inline void clear_notify_signal(void) smp_mb__after_atomic(); } +/* + * Returns 'true' if kick_process() is needed to force a transition from + * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work. + */ +static inline bool __set_notify_signal(struct task_struct *task) +{ + return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && + !wake_up_state(task, TASK_INTERRUPTIBLE); +} + /* * Called to break out of interruptible wait loops, and enter the * exit_to_user_mode_loop(). */ static inline void set_notify_signal(struct task_struct *task) { - if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && - !wake_up_state(task, TASK_INTERRUPTIBLE)) + if (__set_notify_signal(task)) kick_process(task); } diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 897494b597ba..795ef5a68429 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -17,6 +17,7 @@ enum task_work_notify_mode { TWA_NONE, TWA_RESUME, TWA_SIGNAL, + TWA_SIGNAL_NO_IPI, }; static inline bool task_work_pending(struct task_struct *task) diff --git a/kernel/task_work.c b/kernel/task_work.c index c59e1a49bc40..dff75bcde151 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -12,12 +12,22 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */ * @notify: how to notify the targeted task * * Queue @work for task_work_run() below and notify the @task if @notify - * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the - * it will interrupt the targeted task and run the task_work. @TWA_RESUME - * work is run only when the task exits the kernel and returns to user mode, - * or before entering guest mode. Fails if the @task is exiting/exited and thus - * it can't process this @work. Otherwise @work->func() will be called when the - * @task goes through one of the aforementioned transitions, or exits. + * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI. + * + * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted + * task and run the task_work, regardless of whether the task is currently + * running in the kernel or userspace. + * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a + * reschedule IPI to force the targeted task to reschedule and run task_work. + * This can be advantageous if there's no strict requirement that the + * task_work be run as soon as possible, just whenever the task enters the + * kernel anyway. + * @TWA_RESUME work is run only when the task exits the kernel and returns to + * user mode, or before entering guest mode. + * + * Fails if the @task is exiting/exited and thus it can't process this @work. + * Otherwise @work->func() will be called when the @task goes through one of + * the aforementioned transitions, or exits. * * If the targeted task is exiting, then an error is returned and the work item * is not queued. It's up to the caller to arrange for an alternative mechanism @@ -53,6 +63,9 @@ int task_work_add(struct task_struct *task, struct callback_head *work, case TWA_SIGNAL: set_notify_signal(task); break; + case TWA_SIGNAL_NO_IPI: + __set_notify_signal(task); + break; default: WARN_ON_ONCE(1); break; -- cgit v1.2.3 From e1169f06d5bbdbc2b22ae4e3083a4bf75ae5ecee Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Apr 2022 19:49:03 -0600 Subject: io_uring: use TWA_SIGNAL_NO_IPI if IORING_SETUP_COOP_TASKRUN is used If this is set, io_uring will never use an IPI to deliver a task_work notification. This can be used in the common case where a single task or thread communicates with the ring, and doesn't rely on io_uring_cqe_peek(). This provides a noticeable win in performance, both from eliminating the IPI itself, but also from avoiding interrupting the submitting task unnecessarily. Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20220426014904.60384-6-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 17 +++++++++++++---- include/uapi/linux/io_uring.h | 8 ++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index 3c669d8f5e57..0b9ae3615911 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -11327,12 +11327,20 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ctx->user = get_uid(current_user()); /* - * For SQPOLL, we just need a wakeup, always. + * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if + * COOP_TASKRUN is set, then IPIs are never needed by the app. */ - if (ctx->flags & IORING_SETUP_SQPOLL) + ret = -EINVAL; + if (ctx->flags & IORING_SETUP_SQPOLL) { + /* IPI related flags don't make sense with SQPOLL */ + if (ctx->flags & IORING_SETUP_COOP_TASKRUN) + goto err; ctx->notify_method = TWA_SIGNAL_NO_IPI; - else + } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { + ctx->notify_method = TWA_SIGNAL_NO_IPI; + } else { ctx->notify_method = TWA_SIGNAL; + } /* * This is just grabbed for accounting purposes. When a process exits, @@ -11431,7 +11439,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | - IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL)) + IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | + IORING_SETUP_COOP_TASKRUN)) return -EINVAL; return io_uring_create(entries, &p, params); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 980d82eb196e..a84f29d657c3 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -102,6 +102,14 @@ enum { #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ +/* + * Cooperative task running. When requests complete, they often require + * forcing the submitter to transition to the kernel to complete. If this + * flag is set, work will be done when the task transitions anyway, rather + * than force an inter-processor interrupt reschedule. This avoids interrupting + * a task running in userspace, and saves an IPI. + */ +#define IORING_SETUP_COOP_TASKRUN (1U << 8) enum { IORING_OP_NOP, -- cgit v1.2.3 From ef060ea9e4fd3b763e7060a3af0a258d2d5d7c0d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Apr 2022 19:49:04 -0600 Subject: io_uring: add IORING_SETUP_TASKRUN_FLAG If IORING_SETUP_COOP_TASKRUN is set to use cooperative scheduling for running task_work, then IORING_SETUP_TASKRUN_FLAG can be set so the application can tell if task_work is pending in the kernel for this ring. This allows use cases like io_uring_peek_cqe() to still function appropriately, or for the task to know when it would be useful to call io_uring_wait_cqe() to run pending events. Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20220426014904.60384-7-axboe@kernel.dk Signed-off-by: Jens Axboe --- fs/io_uring.c | 14 +++++++++++--- include/uapi/linux/io_uring.h | 7 +++++++ 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0b9ae3615911..72cb2d50125c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2506,6 +2506,8 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) { if (!ctx) return; + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) + atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); if (*locked) { io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); @@ -2646,6 +2648,9 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority) if (running) return; + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) + atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); + if (likely(!task_work_add(tsk, &tctx->task_work, ctx->notify_method))) return; @@ -11333,12 +11338,15 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ret = -EINVAL; if (ctx->flags & IORING_SETUP_SQPOLL) { /* IPI related flags don't make sense with SQPOLL */ - if (ctx->flags & IORING_SETUP_COOP_TASKRUN) + if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | + IORING_SETUP_TASKRUN_FLAG)) goto err; ctx->notify_method = TWA_SIGNAL_NO_IPI; } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { ctx->notify_method = TWA_SIGNAL_NO_IPI; } else { + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) + goto err; ctx->notify_method = TWA_SIGNAL; } @@ -11440,10 +11448,10 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | - IORING_SETUP_COOP_TASKRUN)) + IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG)) return -EINVAL; - return io_uring_create(entries, &p, params); + return io_uring_create(entries, &p, params); } SYSCALL_DEFINE2(io_uring_setup, u32, entries, diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index a84f29d657c3..fad63564678a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -110,6 +110,12 @@ enum { * a task running in userspace, and saves an IPI. */ #define IORING_SETUP_COOP_TASKRUN (1U << 8) +/* + * If COOP_TASKRUN is set, get notified if task work is available for + * running and a kernel transition would be needed to run it. This sets + * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. + */ +#define IORING_SETUP_TASKRUN_FLAG (1U << 9) enum { IORING_OP_NOP, @@ -256,6 +262,7 @@ struct io_sqring_offsets { */ #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ +#define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */ struct io_cqring_offsets { __u32 head; -- cgit v1.2.3 From 0455d4ccec548b0fb51db39a4d3350a7a80a0222 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 26 Apr 2022 12:11:33 -0600 Subject: io_uring: add POLL_FIRST support for send/sendmsg and recv/recvmsg If IORING_RECVSEND_POLL_FIRST is set for recv/recvmsg or send/sendmsg, then we arm poll first rather than attempt a receive or send upfront. This can be useful if we expect there to be no data (or space) available for the request, as we can then avoid wasting time on the initial issue attempt. Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 27 +++++++++++++++++++++++++-- include/uapi/linux/io_uring.h | 10 ++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index efe4e92ad8ad..6db9ab8d4d15 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -637,6 +637,7 @@ struct io_sr_msg { int bgid; size_t len; size_t done_io; + unsigned int flags; }; struct io_open { @@ -5272,11 +5273,14 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = &req->sr_msg; - if (unlikely(sqe->addr2 || sqe->file_index)) + if (unlikely(sqe->file_index)) return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); + sr->flags = READ_ONCE(sqe->addr2); + if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) + return -EINVAL; sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; if (sr->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; @@ -5311,6 +5315,10 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg = &iomsg; } + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return io_setup_async_msg(req, kmsg); + flags = req->sr_msg.msg_flags; if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; @@ -5353,6 +5361,10 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) int min_ret = 0; int ret; + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return -EAGAIN; + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; @@ -5505,11 +5517,14 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = &req->sr_msg; - if (unlikely(sqe->addr2 || sqe->file_index)) + if (unlikely(sqe->file_index)) return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); + sr->flags = READ_ONCE(sqe->addr2); + if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) + return -EINVAL; sr->bgid = READ_ONCE(sqe->buf_group); sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; if (sr->msg_flags & MSG_DONTWAIT) @@ -5546,6 +5561,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg = &iomsg; } + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return io_setup_async_msg(req, kmsg); + if (req->flags & REQ_F_BUFFER_SELECT) { kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) @@ -5603,6 +5622,10 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + if (!(req->flags & REQ_F_POLLED) && + (sr->flags & IORING_RECVSEND_POLL_FIRST)) + return -EAGAIN; + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index fad63564678a..06621a278cb6 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -213,6 +213,16 @@ enum { #define IORING_ASYNC_CANCEL_FD (1U << 1) #define IORING_ASYNC_CANCEL_ANY (1U << 2) +/* + * send/sendmsg and recv/recvmsg flags (sqe->addr2) + * + * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send + * or receive and arm poll if that yields an + * -EAGAIN result, arm poll upfront and skip + * the initial transfer attempt. + */ +#define IORING_RECVSEND_POLL_FIRST (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3 From 1339f24b336db5ded9811f3fe7b948e0de207785 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 7 May 2022 14:18:44 -0600 Subject: io_uring: allow allocated fixed files for openat/openat2 If the application passes in IORING_FILE_INDEX_ALLOC as the file_slot, then that's a hint to allocate a fixed file descriptor rather than have one be passed in directly. This can be useful for having io_uring manage the direct descriptor space. Normal open direct requests will complete with 0 for success, and < 0 in case of error. If io_uring is asked to allocated the direct descriptor, then the direct descriptor is returned in case of success. Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 36 +++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 9 +++++++++ 2 files changed, 42 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index 8c40411a7e78..f448264a1067 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4697,7 +4697,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return __io_openat_prep(req, sqe); } -static int __maybe_unused io_file_bitmap_get(struct io_ring_ctx *ctx) +static int io_file_bitmap_get(struct io_ring_ctx *ctx) { struct io_file_table *table = &ctx->file_table; unsigned long nr = ctx->nr_user_files; @@ -4722,6 +4722,36 @@ static int __maybe_unused io_file_bitmap_get(struct io_ring_ctx *ctx) return -ENFILE; } +static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags, + struct file *file, unsigned int file_slot) +{ + bool alloc_slot = file_slot == IORING_FILE_INDEX_ALLOC; + struct io_ring_ctx *ctx = req->ctx; + int ret; + + if (alloc_slot) { + io_ring_submit_lock(ctx, issue_flags); + ret = io_file_bitmap_get(ctx); + if (unlikely(ret < 0)) { + io_ring_submit_unlock(ctx, issue_flags); + return ret; + } + + file_slot = ret; + } else { + file_slot--; + } + + ret = io_install_fixed_file(req, file, issue_flags, file_slot); + if (alloc_slot) { + io_ring_submit_unlock(ctx, issue_flags); + if (!ret) + return file_slot; + } + + return ret; +} + static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) { struct open_flags op; @@ -4777,8 +4807,8 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) if (!fixed) fd_install(ret, file); else - ret = io_install_fixed_file(req, file, issue_flags, - req->open.file_slot - 1); + ret = io_fixed_fd_install(req, issue_flags, file, + req->open.file_slot); err: putname(req->open.filename); req->flags &= ~REQ_F_NEED_CLEANUP; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 06621a278cb6..b7f02a55032a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -63,6 +63,15 @@ struct io_uring_sqe { __u64 __pad2[2]; }; +/* + * If sqe->file_index is set to this for opcodes that instantiate a new + * direct descriptor (like openat/openat2/accept), then io_uring will allocate + * an available direct descriptor instead of having the application pass one + * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE + * if the space is full. + */ +#define IORING_FILE_INDEX_ALLOC (~0U) + enum { IOSQE_FIXED_FILE_BIT, IOSQE_IO_DRAIN_BIT, -- cgit v1.2.3 From a8da73a32b6e9271a613e5a0e90a8c35f40abeb8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 9 May 2022 09:29:14 -0600 Subject: io_uring: add flag for allocating a fully sparse direct descriptor space Currently to setup a fully sparse descriptor space upfront, the app needs to alloate an array of the full size and memset it to -1 and then pass that in. Make this a bit easier by allowing a flag that simply does this internally rather than needing to copy each slot separately. This works with IORING_REGISTER_FILES2 as the flag is set in struct io_uring_rsrc_register, and is only allow when the type is IORING_RSRC_FILE as this doesn't make sense for registered buffers. Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 15 ++++++++++++--- include/uapi/linux/io_uring.h | 8 +++++++- 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index b75a49d3831b..362189819898 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9111,12 +9111,12 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { struct io_fixed_file *file_slot; - if (copy_from_user(&fd, &fds[i], sizeof(fd))) { + if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) { ret = -EFAULT; goto fail; } /* allow sparse sets */ - if (fd == -1) { + if (!fds || fd == -1) { ret = -EINVAL; if (unlikely(*io_get_tag_slot(ctx->file_data, i))) goto fail; @@ -11759,14 +11759,20 @@ static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, memset(&rr, 0, sizeof(rr)); if (copy_from_user(&rr, arg, size)) return -EFAULT; - if (!rr.nr || rr.resv || rr.resv2) + if (!rr.nr || rr.resv2) + return -EINVAL; + if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE) return -EINVAL; switch (type) { case IORING_RSRC_FILE: + if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) + break; return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), rr.nr, u64_to_user_ptr(rr.tags)); case IORING_RSRC_BUFFER: + if (rr.flags & IORING_RSRC_REGISTER_SPARSE) + break; return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), rr.nr, u64_to_user_ptr(rr.tags)); } @@ -11935,6 +11941,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ret = io_sqe_buffers_unregister(ctx); break; case IORING_REGISTER_FILES: + ret = -EFAULT; + if (!arg) + break; ret = io_sqe_files_register(ctx, arg, nr_args, NULL); break; case IORING_UNREGISTER_FILES: diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index b7f02a55032a..36ec43dc7bf9 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -396,9 +396,15 @@ struct io_uring_files_update { __aligned_u64 /* __s32 * */ fds; }; +/* + * Register a fully sparse file space, rather than pass in an array of all + * -1 file descriptors. + */ +#define IORING_RSRC_REGISTER_SPARSE (1U << 0) + struct io_uring_rsrc_register { __u32 nr; - __u32 resv; + __u32 flags; __u64 resv2; __aligned_u64 data; __aligned_u64 tags; -- cgit v1.2.3 From 390ed29b5e425ba00da2b6113b74a14949f71b02 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Sat, 14 May 2022 22:20:43 +0800 Subject: io_uring: add IORING_ACCEPT_MULTISHOT for accept add an accept_flag IORING_ACCEPT_MULTISHOT for accept, which is to support multishot. Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20220514142046.58072-2-haoxu.linux@gmail.com Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 36ec43dc7bf9..15f821af9242 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -232,6 +232,11 @@ enum { */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) +/* + * accept flags stored in sqe->ioprio + */ +#define IORING_ACCEPT_MULTISHOT (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- cgit v1.2.3 From c7fb19428d67dd0a2a78a4f237af01d39c78dc5a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 30 Apr 2022 14:38:53 -0600 Subject: io_uring: add support for ring mapped supplied buffers Provided buffers allow an application to supply io_uring with buffers that can then be grabbed for a read/receive request, when the data source is ready to deliver data. The existing scheme relies on using IORING_OP_PROVIDE_BUFFERS to do that, but it can be difficult to use in real world applications. It's pretty efficient if the application is able to supply back batches of provided buffers when they have been consumed and the application is ready to recycle them, but if fragmentation occurs in the buffer space, it can become difficult to supply enough buffers at the time. This hurts efficiency. Add a register op, IORING_REGISTER_PBUF_RING, which allows an application to setup a shared queue for each buffer group of provided buffers. The application can then supply buffers simply by adding them to this ring, and the kernel can consume then just as easily. The ring shares the head with the application, the tail remains private in the kernel. Provided buffers setup with IORING_REGISTER_PBUF_RING cannot use IORING_OP_{PROVIDE,REMOVE}_BUFFERS for adding or removing entries to the ring, they must use the mapped ring. Mapped provided buffer rings can co-exist with normal provided buffers, just not within the same group ID. To gauge overhead of the existing scheme and evaluate the mapped ring approach, a simple NOP benchmark was written. It uses a ring of 128 entries, and submits/completes 32 at the time. 'Replenish' is how many buffers are provided back at the time after they have been consumed: Test Replenish NOPs/sec ================================================================ No provided buffers NA ~30M Provided buffers 32 ~16M Provided buffers 1 ~10M Ring buffers 32 ~27M Ring buffers 1 ~27M The ring mapped buffers perform almost as well as not using provided buffers at all, and they don't care if you provided 1 or more back at the same time. This means application can just replenish as they go, rather than need to batch and compact, further reducing overhead in the application. The NOP benchmark above doesn't need to do any compaction, so that overhead isn't even reflected in the above test. Co-developed-by: Dylan Yudaken Signed-off-by: Jens Axboe --- fs/io_uring.c | 234 +++++++++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 36 +++++++ 2 files changed, 258 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index 384cdbd40941..78192a9e7684 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -285,9 +285,26 @@ struct io_rsrc_data { bool quiesce; }; +#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf)) struct io_buffer_list { - struct list_head buf_list; + /* + * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, + * then these are classic provided buffers and ->buf_list is used. + */ + union { + struct list_head buf_list; + struct { + struct page **buf_pages; + struct io_uring_buf_ring *buf_ring; + }; + }; __u16 bgid; + + /* below is for ring provided buffers */ + __u16 buf_nr_pages; + __u16 nr_entries; + __u32 head; + __u32 mask; }; struct io_buffer { @@ -804,6 +821,7 @@ enum { REQ_F_NEED_CLEANUP_BIT, REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, + REQ_F_BUFFER_RING_BIT, REQ_F_COMPLETE_INLINE_BIT, REQ_F_REISSUE_BIT, REQ_F_CREDS_BIT, @@ -855,6 +873,8 @@ enum { REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), + /* buffer selected from ring, needs commit */ + REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), /* completion is deferred through io_comp_state */ REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), /* caller should reissue async */ @@ -979,6 +999,12 @@ struct io_kiocb { /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; + + /* + * stores buffer ID for ring provided buffers, valid IFF + * REQ_F_BUFFER_RING is set. + */ + struct io_buffer_list *buf_list; }; union { @@ -1470,8 +1496,14 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req, static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list) { - req->flags &= ~REQ_F_BUFFER_SELECTED; - list_add(&req->kbuf->list, list); + if (req->flags & REQ_F_BUFFER_RING) { + if (req->buf_list) + req->buf_list->head++; + req->flags &= ~REQ_F_BUFFER_RING; + } else { + list_add(&req->kbuf->list, list); + req->flags &= ~REQ_F_BUFFER_SELECTED; + } return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); } @@ -1480,7 +1512,7 @@ static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) { lockdep_assert_held(&req->ctx->completion_lock); - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return 0; return __io_put_kbuf(req, &req->ctx->io_buffers_comp); } @@ -1490,7 +1522,7 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, { unsigned int cflags; - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return 0; /* @@ -1505,7 +1537,10 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, * We migrate buffers from the comp_list to the issue cache list * when we need one. */ - if (issue_flags & IO_URING_F_UNLOCKED) { + if (req->flags & REQ_F_BUFFER_RING) { + /* no buffers to recycle for this case */ + cflags = __io_put_kbuf(req, NULL); + } else if (issue_flags & IO_URING_F_UNLOCKED) { struct io_ring_ctx *ctx = req->ctx; spin_lock(&ctx->completion_lock); @@ -1535,11 +1570,23 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) struct io_buffer_list *bl; struct io_buffer *buf; - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return; /* don't recycle if we already did IO to this buffer */ if (req->flags & REQ_F_PARTIAL_IO) return; + /* + * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear + * the flag and hence ensure that bl->head doesn't get incremented. + * If the tail has already been incremented, hang on to it. + */ + if (req->flags & REQ_F_BUFFER_RING) { + if (req->buf_list) { + req->buf_index = req->buf_list->bgid; + req->flags &= ~REQ_F_BUFFER_RING; + } + return; + } io_ring_submit_lock(ctx, issue_flags); @@ -3487,6 +3534,53 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, return ret; } +static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, + struct io_buffer_list *bl, + unsigned int issue_flags) +{ + struct io_uring_buf_ring *br = bl->buf_ring; + struct io_uring_buf *buf; + __u32 head = bl->head; + + if (unlikely(smp_load_acquire(&br->tail) == head)) { + io_ring_submit_unlock(req->ctx, issue_flags); + return ERR_PTR(-ENOBUFS); + } + + head &= bl->mask; + if (head < IO_BUFFER_LIST_BUF_PER_PAGE) { + buf = &br->bufs[head]; + } else { + int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1); + int index = head / IO_BUFFER_LIST_BUF_PER_PAGE - 1; + buf = page_address(bl->buf_pages[index]); + buf += off; + } + if (*len > buf->len) + *len = buf->len; + req->flags |= REQ_F_BUFFER_RING; + req->buf_list = bl; + req->buf_index = buf->bid; + + if (!(issue_flags & IO_URING_F_UNLOCKED)) + return u64_to_user_ptr(buf->addr); + + /* + * If we came in unlocked, we have no choice but to + * consume the buffer here. This does mean it'll be + * pinned until the IO completes. But coming in + * unlocked means we're in io-wq context, hence there + * should be no further retry. For the locked case, the + * caller must ensure to call the commit when the + * transfer completes (or if we get -EAGAIN and must + * poll or retry). + */ + req->buf_list = NULL; + bl->head++; + io_ring_submit_unlock(req->ctx, issue_flags); + return u64_to_user_ptr(buf->addr); +} + static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags) { @@ -3502,6 +3596,9 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, } /* selection helpers drop the submit lock again, if needed */ + if (bl->buf_nr_pages) + return io_ring_buffer_select(req, len, bl, issue_flags); + return io_provided_buffer_select(req, len, bl, issue_flags); } @@ -3558,7 +3655,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, unsigned int issue_flags) { - if (req->flags & REQ_F_BUFFER_SELECTED) { + if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { iov[0].iov_base = u64_to_user_ptr(req->rw.addr); iov[0].iov_len = req->rw.len; return 0; @@ -3578,7 +3675,7 @@ static inline bool io_do_buffer_select(struct io_kiocb *req) { if (!(req->flags & REQ_F_BUFFER_SELECT)) return false; - return !(req->flags & REQ_F_BUFFER_SELECTED); + return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); } static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, @@ -4872,6 +4969,18 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, if (!nbufs) return 0; + if (bl->buf_nr_pages) { + int j; + + i = bl->buf_ring->tail - bl->head; + for (j = 0; j < bl->buf_nr_pages; j++) + unpin_user_page(bl->buf_pages[j]); + kvfree(bl->buf_pages); + bl->buf_pages = NULL; + bl->buf_nr_pages = 0; + return i; + } + /* the head kbuf is the list itself */ while (!list_empty(&bl->buf_list)) { struct io_buffer *nxt; @@ -4898,8 +5007,12 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ret = -ENOENT; bl = io_buffer_get_list(ctx, p->bgid); - if (bl) - ret = __io_remove_buffers(ctx, bl, p->nbufs); + if (bl) { + ret = -EINVAL; + /* can't use provide/remove buffers command on mapped buffers */ + if (!bl->buf_nr_pages) + ret = __io_remove_buffers(ctx, bl, p->nbufs); + } if (ret < 0) req_set_fail(req); @@ -5047,7 +5160,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) bl = io_buffer_get_list(ctx, p->bgid); if (unlikely(!bl)) { - bl = kmalloc(sizeof(*bl), GFP_KERNEL); + bl = kzalloc(sizeof(*bl), GFP_KERNEL); if (!bl) { ret = -ENOMEM; goto err; @@ -5058,6 +5171,11 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) goto err; } } + /* can't add buffers via this command for a mapped buffer ring */ + if (bl->buf_nr_pages) { + ret = -EINVAL; + goto err; + } ret = io_add_buffers(ctx, p, bl); err: @@ -12011,6 +12129,83 @@ err: return ret; } +static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) +{ + struct io_uring_buf_ring *br; + struct io_uring_buf_reg reg; + struct io_buffer_list *bl; + struct page **pages; + int nr_pages; + + if (copy_from_user(®, arg, sizeof(reg))) + return -EFAULT; + + if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2]) + return -EINVAL; + if (!reg.ring_addr) + return -EFAULT; + if (reg.ring_addr & ~PAGE_MASK) + return -EINVAL; + if (!is_power_of_2(reg.ring_entries)) + return -EINVAL; + + if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) { + int ret = io_init_bl_list(ctx); + if (ret) + return ret; + } + + bl = io_buffer_get_list(ctx, reg.bgid); + if (bl && bl->buf_nr_pages) + return -EEXIST; + if (!bl) { + bl = kzalloc(sizeof(*bl), GFP_KERNEL); + if (!bl) + return -ENOMEM; + } + + pages = io_pin_pages(reg.ring_addr, + struct_size(br, bufs, reg.ring_entries), + &nr_pages); + if (IS_ERR(pages)) { + kfree(bl); + return PTR_ERR(pages); + } + + br = page_address(pages[0]); + bl->buf_pages = pages; + bl->buf_nr_pages = nr_pages; + bl->nr_entries = reg.ring_entries; + bl->buf_ring = br; + bl->mask = reg.ring_entries - 1; + io_buffer_add_list(ctx, bl, reg.bgid); + return 0; +} + +static int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) +{ + struct io_uring_buf_reg reg; + struct io_buffer_list *bl; + + if (copy_from_user(®, arg, sizeof(reg))) + return -EFAULT; + if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2]) + return -EINVAL; + + bl = io_buffer_get_list(ctx, reg.bgid); + if (!bl) + return -ENOENT; + if (!bl->buf_nr_pages) + return -EINVAL; + + __io_remove_buffers(ctx, bl, -1U); + if (bl->bgid >= BGID_ARRAY) { + xa_erase(&ctx->io_bl_xa, bl->bgid); + kfree(bl); + } + return 0; +} + static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, void __user *arg, unsigned nr_args) __releases(ctx->uring_lock) @@ -12142,6 +12337,18 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, case IORING_UNREGISTER_RING_FDS: ret = io_ringfd_unregister(ctx, arg, nr_args); break; + case IORING_REGISTER_PBUF_RING: + ret = -EINVAL; + if (!arg || nr_args != 1) + break; + ret = io_register_pbuf_ring(ctx, arg); + break; + case IORING_UNREGISTER_PBUF_RING: + ret = -EINVAL; + if (!arg || nr_args != 1) + break; + ret = io_unregister_pbuf_ring(ctx, arg); + break; default: ret = -EINVAL; break; @@ -12227,6 +12434,9 @@ static int __init io_uring_init(void) /* ->buf_index is u16 */ BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); BUILD_BUG_ON(BGID_ARRAY * sizeof(struct io_buffer_list) > PAGE_SIZE); + BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); + BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != + offsetof(struct io_uring_buf_ring, tail)); /* should fit into one byte */ BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 15f821af9242..ddf969ae5a79 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -384,6 +384,10 @@ enum { IORING_REGISTER_RING_FDS = 20, IORING_UNREGISTER_RING_FDS = 21, + /* register ring based provide buffer group */ + IORING_REGISTER_PBUF_RING = 22, + IORING_UNREGISTER_PBUF_RING = 23, + /* this goes last */ IORING_REGISTER_LAST }; @@ -461,6 +465,38 @@ struct io_uring_restriction { __u32 resv2[3]; }; +struct io_uring_buf { + __u64 addr; + __u32 len; + __u16 bid; + __u16 resv; +}; + +struct io_uring_buf_ring { + union { + /* + * To avoid spilling into more pages than we need to, the + * ring tail is overlaid with the io_uring_buf->resv field. + */ + struct { + __u64 resv1; + __u32 resv2; + __u16 resv3; + __u16 tail; + }; + struct io_uring_buf bufs[0]; + }; +}; + +/* argument for IORING_(UN)REGISTER_PBUF_RING */ +struct io_uring_buf_reg { + __u64 ring_addr; + __u32 ring_entries; + __u16 bgid; + __u16 pad; + __u64 resv[3]; +}; + /* * io_uring_restriction->opcode values */ -- cgit v1.2.3 From 0e7579ca732a39cc377e17509dda9bfc4f6ba78e Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 19 May 2022 17:30:49 +0300 Subject: io_uring: fix incorrect __kernel_rwf_t cast Currently 'make C=1 fs/io_uring.o' generates sparse warning: CHECK fs/io_uring.c fs/io_uring.c: note: in included file (through include/trace/trace_events.h, include/trace/define_trace.h, i nclude/trace/events/io_uring.h): ./include/trace/events/io_uring.h:488:1: warning: incorrect type in assignment (different base types) expected unsigned int [usertype] op_flags got restricted __kernel_rwf_t const [usertype] rw_flags This happen on cast of sqe->rw_flags which is defined as __kernel_rwf_t, this type is bitwise and requires __force attribute for any casts. However rw_flags is a member of the union, and its access can be safely replaced by using of its neighbours, so let's use poll32_events to fix the sparse warning. Signed-off-by: Vasily Averin Link: https://lore.kernel.org/r/6f009241-a63f-ae43-a04b-62841aaef293@openvz.org Signed-off-by: Jens Axboe --- include/trace/events/io_uring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 42534ec2ab9d..7cfd9c5cff37 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -520,7 +520,7 @@ TRACE_EVENT(io_uring_req_failed, __entry->off = sqe->off; __entry->addr = sqe->addr; __entry->len = sqe->len; - __entry->op_flags = sqe->rw_flags; + __entry->op_flags = sqe->poll32_events; __entry->buf_index = sqe->buf_index; __entry->personality = sqe->personality; __entry->file_index = sqe->file_index; -- cgit v1.2.3