aboutsummaryrefslogtreecommitdiff
path: root/io_uring/cancel.c
diff options
context:
space:
mode:
authorLinus Torvalds2022-08-02 13:20:44 -0700
committerLinus Torvalds2022-08-02 13:20:44 -0700
commitb349b1181d24af1c151134a3c39725e94a5619dd (patch)
tree7347cc4035de947c22e575ac7c649c0fa8658dd1 /io_uring/cancel.c
parentefb2883060afc79638bb1eb19e2c30e7f6c5a178 (diff)
parentf6b543fd03d347e8bf245cee4f2d54eb6ffd8fcb (diff)
Merge tag 'for-5.20/io_uring-2022-07-29' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe: - As per (valid) complaint in the last merge window, fs/io_uring.c has grown quite large these days. io_uring isn't really tied to fs either, as it supports a wide variety of functionality outside of that. Move the code to io_uring/ and split it into files that either implement a specific request type, and split some code into helpers as well. The code is organized a lot better like this, and io_uring.c is now < 4K LOC (me). - Deprecate the epoll_ctl opcode. It'll still work, just trigger a warning once if used. If we don't get any complaints on this, and I don't expect any, then we can fully remove it in a future release (me). - Improve the cancel hash locking (Hao) - kbuf cleanups (Hao) - Efficiency improvements to the task_work handling (Dylan, Pavel) - Provided buffer improvements (Dylan) - Add support for recv/recvmsg multishot support. This is similar to the accept (or poll) support for have for multishot, where a single SQE can trigger everytime data is received. For applications that expect to do more than a few receives on an instantiated socket, this greatly improves efficiency (Dylan). - Efficiency improvements for poll handling (Pavel) - Poll cancelation improvements (Pavel) - Allow specifiying a range for direct descriptor allocations (Pavel) - Cleanup the cqe32 handling (Pavel) - Move io_uring types to greatly cleanup the tracing (Pavel) - Tons of great code cleanups and improvements (Pavel) - Add a way to do sync cancelations rather than through the sqe -> cqe interface, as that's a lot easier to use for some use cases (me). - Add support to IORING_OP_MSG_RING for sending direct descriptors to a different ring. This avoids the usually problematic SCM case, as we disallow those. (me) - Make the per-command alloc cache we use for apoll generic, place limits on it, and use it for netmsg as well (me). - Various cleanups (me, Michal, Gustavo, Uros) * tag 'for-5.20/io_uring-2022-07-29' of git://git.kernel.dk/linux-block: (172 commits) io_uring: ensure REQ_F_ISREG is set async offload net: fix compat pointer in get_compat_msghdr() io_uring: Don't require reinitable percpu_ref io_uring: fix types in io_recvmsg_multishot_overflow io_uring: Use atomic_long_try_cmpxchg in __io_account_mem io_uring: support multishot in recvmsg net: copy from user before calling __get_compat_msghdr net: copy from user before calling __copy_msghdr io_uring: support 0 length iov in buffer select in compat io_uring: fix multishot ending when not polled io_uring: add netmsg cache io_uring: impose max limit on apoll cache io_uring: add abstraction around apoll cache io_uring: move apoll cache to poll.c io_uring: consolidate hash_locked io-wq handling io_uring: clear REQ_F_HASH_LOCKED on hash removal io_uring: don't race double poll setting REQ_F_ASYNC_DATA io_uring: don't miss setting REQ_F_DOUBLE_POLL io_uring: disable multishot recvmsg io_uring: only trace one of complete or overflow ...
Diffstat (limited to 'io_uring/cancel.c')
-rw-r--r--io_uring/cancel.c315
1 files changed, 315 insertions, 0 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
new file mode 100644
index 000000000000..8435a1eba59a
--- /dev/null
+++ b/io_uring/cancel.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/nospec.h>
+#include <linux/io_uring.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "io_uring.h"
+#include "tctx.h"
+#include "poll.h"
+#include "timeout.h"
+#include "cancel.h"
+
+struct io_cancel {
+ struct file *file;
+ u64 addr;
+ u32 flags;
+ s32 fd;
+};
+
+#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
+ IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
+
+static bool io_cancel_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_cancel_data *cd = data;
+
+ if (req->ctx != cd->ctx)
+ return false;
+ if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
+ ;
+ } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
+ if (req->file != cd->file)
+ return false;
+ } else {
+ if (req->cqe.user_data != cd->data)
+ return false;
+ }
+ if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
+ if (cd->seq == req->work.cancel_seq)
+ return false;
+ req->work.cancel_seq = cd->seq;
+ }
+ return true;
+}
+
+static int io_async_cancel_one(struct io_uring_task *tctx,
+ struct io_cancel_data *cd)
+{
+ enum io_wq_cancel cancel_ret;
+ int ret = 0;
+ bool all;
+
+ if (!tctx || !tctx->io_wq)
+ return -ENOENT;
+
+ all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
+ cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
+ switch (cancel_ret) {
+ case IO_WQ_CANCEL_OK:
+ ret = 0;
+ break;
+ case IO_WQ_CANCEL_RUNNING:
+ ret = -EALREADY;
+ break;
+ case IO_WQ_CANCEL_NOTFOUND:
+ ret = -ENOENT;
+ break;
+ }
+
+ return ret;
+}
+
+int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
+ unsigned issue_flags)
+{
+ struct io_ring_ctx *ctx = cd->ctx;
+ int ret;
+
+ WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
+
+ ret = io_async_cancel_one(tctx, cd);
+ /*
+ * Fall-through even for -EALREADY, as we may have poll armed
+ * that need unarming.
+ */
+ if (!ret)
+ return 0;
+
+ ret = io_poll_cancel(ctx, cd, issue_flags);
+ if (ret != -ENOENT)
+ return ret;
+
+ spin_lock(&ctx->completion_lock);
+ if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
+ ret = io_timeout_cancel(ctx, cd);
+ spin_unlock(&ctx->completion_lock);
+ return ret;
+}
+
+int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_cancel *cancel = io_kiocb_to_cmd(req);
+
+ if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
+ return -EINVAL;
+ if (sqe->off || sqe->len || sqe->splice_fd_in)
+ return -EINVAL;
+
+ cancel->addr = READ_ONCE(sqe->addr);
+ cancel->flags = READ_ONCE(sqe->cancel_flags);
+ if (cancel->flags & ~CANCEL_FLAGS)
+ return -EINVAL;
+ if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
+ if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
+ return -EINVAL;
+ cancel->fd = READ_ONCE(sqe->fd);
+ }
+
+ return 0;
+}
+
+static int __io_async_cancel(struct io_cancel_data *cd,
+ struct io_uring_task *tctx,
+ unsigned int issue_flags)
+{
+ bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
+ struct io_ring_ctx *ctx = cd->ctx;
+ struct io_tctx_node *node;
+ int ret, nr = 0;
+
+ do {
+ ret = io_try_cancel(tctx, cd, issue_flags);
+ if (ret == -ENOENT)
+ break;
+ if (!all)
+ return ret;
+ nr++;
+ } while (1);
+
+ /* slow path, try all io-wq's */
+ io_ring_submit_lock(ctx, issue_flags);
+ ret = -ENOENT;
+ list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+ struct io_uring_task *tctx = node->task->io_uring;
+
+ ret = io_async_cancel_one(tctx, cd);
+ if (ret != -ENOENT) {
+ if (!all)
+ break;
+ nr++;
+ }
+ }
+ io_ring_submit_unlock(ctx, issue_flags);
+ return all ? nr : ret;
+}
+
+int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_cancel *cancel = io_kiocb_to_cmd(req);
+ struct io_cancel_data cd = {
+ .ctx = req->ctx,
+ .data = cancel->addr,
+ .flags = cancel->flags,
+ .seq = atomic_inc_return(&req->ctx->cancel_seq),
+ };
+ struct io_uring_task *tctx = req->task->io_uring;
+ int ret;
+
+ if (cd.flags & IORING_ASYNC_CANCEL_FD) {
+ if (req->flags & REQ_F_FIXED_FILE ||
+ cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
+ req->flags |= REQ_F_FIXED_FILE;
+ req->file = io_file_get_fixed(req, cancel->fd,
+ issue_flags);
+ } else {
+ req->file = io_file_get_normal(req, cancel->fd);
+ }
+ if (!req->file) {
+ ret = -EBADF;
+ goto done;
+ }
+ cd.file = req->file;
+ }
+
+ ret = __io_async_cancel(&cd, tctx, issue_flags);
+done:
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
+}
+
+void init_hash_table(struct io_hash_table *table, unsigned size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ spin_lock_init(&table->hbs[i].lock);
+ INIT_HLIST_HEAD(&table->hbs[i].list);
+ }
+}
+
+static int __io_sync_cancel(struct io_uring_task *tctx,
+ struct io_cancel_data *cd, int fd)
+{
+ struct io_ring_ctx *ctx = cd->ctx;
+
+ /* fixed must be grabbed every time since we drop the uring_lock */
+ if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
+ (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
+ unsigned long file_ptr;
+
+ if (unlikely(fd > ctx->nr_user_files))
+ return -EBADF;
+ fd = array_index_nospec(fd, ctx->nr_user_files);
+ file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
+ cd->file = (struct file *) (file_ptr & FFS_MASK);
+ if (!cd->file)
+ return -EBADF;
+ }
+
+ return __io_async_cancel(cd, tctx, 0);
+}
+
+int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_cancel_data cd = {
+ .ctx = ctx,
+ .seq = atomic_inc_return(&ctx->cancel_seq),
+ };
+ ktime_t timeout = KTIME_MAX;
+ struct io_uring_sync_cancel_reg sc;
+ struct fd f = { };
+ DEFINE_WAIT(wait);
+ int ret;
+
+ if (copy_from_user(&sc, arg, sizeof(sc)))
+ return -EFAULT;
+ if (sc.flags & ~CANCEL_FLAGS)
+ return -EINVAL;
+ if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
+ return -EINVAL;
+
+ cd.data = sc.addr;
+ cd.flags = sc.flags;
+
+ /* we can grab a normal file descriptor upfront */
+ if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
+ !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
+ f = fdget(sc.fd);
+ if (!f.file)
+ return -EBADF;
+ cd.file = f.file;
+ }
+
+ ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+
+ /* found something, done! */
+ if (ret != -EALREADY)
+ goto out;
+
+ if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
+ struct timespec64 ts = {
+ .tv_sec = sc.timeout.tv_sec,
+ .tv_nsec = sc.timeout.tv_nsec
+ };
+
+ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+ }
+
+ /*
+ * Keep looking until we get -ENOENT. we'll get woken everytime
+ * every time a request completes and will retry the cancelation.
+ */
+ do {
+ cd.seq = atomic_inc_return(&ctx->cancel_seq);
+
+ prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
+
+ ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+
+ if (ret != -EALREADY)
+ break;
+
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_run_task_work_sig();
+ if (ret < 0) {
+ mutex_lock(&ctx->uring_lock);
+ break;
+ }
+ ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
+ mutex_lock(&ctx->uring_lock);
+ if (!ret) {
+ ret = -ETIME;
+ break;
+ }
+ } while (1);
+
+ finish_wait(&ctx->cq_wait, &wait);
+
+ if (ret == -ENOENT || ret > 0)
+ ret = 0;
+out:
+ fdput(f);
+ return ret;
+}