diff options
author | Linus Torvalds | 2021-05-15 08:43:44 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-05-15 08:43:44 -0700 |
commit | 56015910355992f040f6163dcec96642021d2737 (patch) | |
tree | d41cc975754413ef50635e9373a3d0604967eae1 | |
parent | 41f035c0626521fb2fdd694803c3397dbaddc9f3 (diff) | |
parent | 489809e2e22b3dedc0737163d97eb2b574137b42 (diff) |
Merge tag 'io_uring-5.13-2021-05-14' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe:
"Just a few minor fixes/changes:
- Fix issue with double free race for linked timeout completions
- Fix reference issue with timeouts
- Remove last few places that make SQPOLL special, since it's just an
io thread now.
- Bump maximum allowed registered buffers, as we don't allocate as
much anymore"
* tag 'io_uring-5.13-2021-05-14' of git://git.kernel.dk/linux-block:
io_uring: increase max number of reg buffers
io_uring: further remove sqpoll limits on opcodes
io_uring: fix ltout double free on completion race
io_uring: fix link timeout refs
-rw-r--r-- | fs/io_uring.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index f46acbbeed57..e481ac8a757a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -100,6 +100,8 @@ #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ IORING_REGISTER_LAST + IORING_OP_LAST) +#define IORING_MAX_REG_BUFFERS (1U << 14) + #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ IOSQE_BUFFER_SELECT) @@ -4035,7 +4037,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #if defined(CONFIG_EPOLL) if (sqe->ioprio || sqe->buf_index) return -EINVAL; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; req->epoll.epfd = READ_ONCE(sqe->fd); @@ -4150,7 +4152,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->buf_index) return -EINVAL; @@ -5827,8 +5829,6 @@ done: static int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) - return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->rw_flags) @@ -6354,19 +6354,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) * We don't expect the list to be empty, that will only happen if we * race with the completion of the linked work. */ - if (prev && req_ref_inc_not_zero(prev)) + if (prev) { io_remove_next_linked(prev); - else - prev = NULL; + if (!req_ref_inc_not_zero(prev)) + prev = NULL; + } spin_unlock_irqrestore(&ctx->completion_lock, flags); if (prev) { io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); io_put_req_deferred(prev, 1); + io_put_req_deferred(req, 1); } else { io_req_complete_post(req, -ETIME, 0); } - io_put_req_deferred(req, 1); return HRTIMER_NORESTART; } @@ -8390,7 +8391,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, if (ctx->user_bufs) return -EBUSY; - if (!nr_args || nr_args > UIO_MAXIOV) + if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) return -EINVAL; ret = io_rsrc_node_switch_start(ctx); if (ret) |