From 7c25c0d16ef3c37e49c593ac92f69fa3884d4bb9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 07:17:00 -0700 Subject: io_uring: remove the need for relying on an io-wq fallback worker We hit this case when the task is exiting, and we need somewhere to do background cleanup of requests. Instead of relying on the io-wq task manager to do this work for us, just stuff it somewhere where we can safely run it ourselves directly. Signed-off-by: Jens Axboe --- fs/io-wq.c | 12 ------------ fs/io-wq.h | 2 -- fs/io_uring.c | 37 ++++++++++++++++++++++++++++++++++--- 3 files changed, 34 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index c36bbcd823ce..800b299f9772 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -775,9 +774,6 @@ static int io_wq_manager(void *data) complete(&wq->done); while (!kthread_should_stop()) { - if (current->task_works) - task_work_run(); - for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; bool fork_worker[2] = { false, false }; @@ -800,9 +796,6 @@ static int io_wq_manager(void *data) schedule_timeout(HZ); } - if (current->task_works) - task_work_run(); - out: if (refcount_dec_and_test(&wq->refs)) { complete(&wq->done); @@ -1160,11 +1153,6 @@ void io_wq_destroy(struct io_wq *wq) __io_wq_destroy(wq); } -struct task_struct *io_wq_get_task(struct io_wq *wq) -{ - return wq->manager; -} - static bool io_wq_worker_affinity(struct io_worker *worker, void *data) { struct task_struct *task = worker->task; diff --git a/fs/io-wq.h b/fs/io-wq.h index 096f1021018e..a1610702f222 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -124,8 +124,6 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *); enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, void *data, bool cancel_all); -struct task_struct *io_wq_get_task(struct io_wq *wq); - #if defined(CONFIG_IO_WQ) extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_running(struct task_struct *); diff --git a/fs/io_uring.c b/fs/io_uring.c index ace7494e7404..e3eb37304e24 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -456,6 +456,9 @@ struct io_ring_ctx { struct io_restriction restrictions; + /* exit task_work */ + struct callback_head *exit_task_work; + /* Keep this last, we don't need it for the fast path */ struct work_struct exit_work; }; @@ -2328,11 +2331,14 @@ static int io_req_task_work_add(struct io_kiocb *req) static void io_req_task_work_add_fallback(struct io_kiocb *req, task_work_func_t cb) { - struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq); + struct io_ring_ctx *ctx = req->ctx; + struct callback_head *head; init_task_work(&req->task_work, cb); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); + do { + head = READ_ONCE(ctx->exit_task_work); + req->task_work.next = head; + } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head); } static void __io_req_task_cancel(struct io_kiocb *req, int error) @@ -8835,6 +8841,28 @@ static int io_remove_personalities(int id, void *p, void *data) return 0; } +static void io_run_ctx_fallback(struct io_ring_ctx *ctx) +{ + struct callback_head *work, *head, *next; + + do { + do { + head = NULL; + work = READ_ONCE(ctx->exit_task_work); + } while (cmpxchg(&ctx->exit_task_work, work, head) != work); + + if (!work) + break; + + do { + next = work->next; + work->func(work); + work = next; + cond_resched(); + } while (work); + } while (1); +} + static void io_ring_exit_work(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, @@ -8848,6 +8876,7 @@ static void io_ring_exit_work(struct work_struct *work) */ do { io_uring_try_cancel_requests(ctx, NULL, NULL); + io_run_ctx_fallback(ctx); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); io_ring_ctx_free(ctx); } @@ -9243,6 +9272,8 @@ static int io_uring_flush(struct file *file, void *data) io_req_caches_free(ctx, current); } + io_run_ctx_fallback(ctx); + if (!tctx) return 0; -- cgit v1.2.3 From 1cbd9c2bcf02a3be91e14c7206d4b6c0346540ed Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 10:06:21 -0700 Subject: io-wq: don't create any IO workers upfront When the manager thread starts up, it creates a worker per node for the given context. Just let these get created dynamically, like we do for adding further workers. Signed-off-by: Jens Axboe --- fs/io-wq.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index 800b299f9772..e9e218274c76 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -759,18 +759,7 @@ static int io_wq_manager(void *data) struct io_wq *wq = data; int node; - /* create fixed workers */ refcount_set(&wq->refs, 1); - for_each_node(node) { - if (!node_online(node)) - continue; - if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND)) - continue; - set_bit(IO_WQ_BIT_ERROR, &wq->state); - set_bit(IO_WQ_BIT_EXIT, &wq->state); - goto out; - } - complete(&wq->done); while (!kthread_should_stop()) { @@ -796,7 +785,6 @@ static int io_wq_manager(void *data) schedule_timeout(HZ); } -out: if (refcount_dec_and_test(&wq->refs)) { complete(&wq->done); return 0; -- cgit v1.2.3 From d25e3a3de0d6fb2f660dbc7d643b2c632beb1743 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 11:41:41 -0700 Subject: io_uring: disable io-wq attaching Moving towards making the io_wq per ring per task, so we can't really share it between rings. Which is fine, since we've now dropped some of that fat from it. Retain compatibility with how attaching works, so that any attempt to attach to an fd that doesn't exist, or isn't an io_uring fd, will fail like it did before. Signed-off-by: Jens Axboe --- fs/io_uring.c | 55 ++++++++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index e3eb37304e24..d6c2ff6124fd 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8105,12 +8105,9 @@ static struct io_wq_work *io_free_work(struct io_wq_work *work) return req ? &req->work : NULL; } -static int io_init_wq_offload(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static int io_init_wq_offload(struct io_ring_ctx *ctx) { struct io_wq_data data; - struct fd f; - struct io_ring_ctx *ctx_attach; unsigned int concurrency; int ret = 0; @@ -8118,37 +8115,15 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx, data.free_work = io_free_work; data.do_work = io_wq_submit_work; - if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { - /* Do QD, or 4 * CPUS, whatever is smallest */ - concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - - ctx->io_wq = io_wq_create(concurrency, &data); - if (IS_ERR(ctx->io_wq)) { - ret = PTR_ERR(ctx->io_wq); - ctx->io_wq = NULL; - } - return ret; - } - - f = fdget(p->wq_fd); - if (!f.file) - return -EBADF; - - if (f.file->f_op != &io_uring_fops) { - ret = -EINVAL; - goto out_fput; - } + /* Do QD, or 4 * CPUS, whatever is smallest */ + concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - ctx_attach = f.file->private_data; - /* @io_wq is protected by holding the fd */ - if (!io_wq_get(ctx_attach->io_wq, &data)) { - ret = -EINVAL; - goto out_fput; + ctx->io_wq = io_wq_create(concurrency, &data); + if (IS_ERR(ctx->io_wq)) { + ret = PTR_ERR(ctx->io_wq); + ctx->io_wq = NULL; } - ctx->io_wq = ctx_attach->io_wq; -out_fput: - fdput(f); return ret; } @@ -8200,6 +8175,20 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, { int ret; + /* Retain compatibility with failing for an invalid attach attempt */ + if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == + IORING_SETUP_ATTACH_WQ) { + struct fd f; + + f = fdget(p->wq_fd); + if (!f.file) + return -ENXIO; + if (f.file->f_op != &io_uring_fops) { + fdput(f); + return -EINVAL; + } + fdput(f); + } if (ctx->flags & IORING_SETUP_SQPOLL) { struct io_sq_data *sqd; @@ -8257,7 +8246,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, } done: - ret = io_init_wq_offload(ctx, p); + ret = io_init_wq_offload(ctx); if (ret) goto err; -- cgit v1.2.3 From 3b094e727dd5b24b4b259a8617b375dd20c16347 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 15:42:24 -0700 Subject: io-wq: get rid of wq->use_refs We don't support attach anymore, so doesn't make sense to carry the use_refs reference count. Get rid of it. Signed-off-by: Jens Axboe --- fs/io-wq.c | 19 +------------------ fs/io-wq.h | 1 - 2 files changed, 1 insertion(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index e9e218274c76..0c47febfed9b 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -122,8 +122,6 @@ struct io_wq { struct completion done; struct hlist_node cpuhp_node; - - refcount_t use_refs; }; static enum cpuhp_state io_wq_online; @@ -1086,7 +1084,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) ret = -ENOMEM; goto err; } - refcount_set(&wq->use_refs, 1); reinit_completion(&wq->done); return wq; } @@ -1104,15 +1101,7 @@ err_wq: return ERR_PTR(ret); } -bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) -{ - if (data->free_work != wq->free_work || data->do_work != wq->do_work) - return false; - - return refcount_inc_not_zero(&wq->use_refs); -} - -static void __io_wq_destroy(struct io_wq *wq) +void io_wq_destroy(struct io_wq *wq) { int node; @@ -1135,12 +1124,6 @@ static void __io_wq_destroy(struct io_wq *wq) kfree(wq); } -void io_wq_destroy(struct io_wq *wq) -{ - if (refcount_dec_and_test(&wq->use_refs)) - __io_wq_destroy(wq); -} - static bool io_wq_worker_affinity(struct io_worker *worker, void *data) { struct task_struct *task = worker->task; diff --git a/fs/io-wq.h b/fs/io-wq.h index a1610702f222..d2cf284b4641 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -108,7 +108,6 @@ struct io_wq_data { }; struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); -bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); void io_wq_destroy(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); -- cgit v1.2.3 From 5aa75ed5b93f086c455a3c67239b0471ff5a1526 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 12:56:50 -0700 Subject: io_uring: tie async worker side to the task context Move it outside of the io_ring_ctx, and tie it to the io_uring task context. Signed-off-by: Jens Axboe --- fs/io_uring.c | 84 ++++++++++++++++++++---------------------------- include/linux/io_uring.h | 1 + 2 files changed, 35 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index d6c2ff6124fd..31402a19fca6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -366,9 +366,6 @@ struct io_ring_ctx { struct io_rings *rings; - /* IO offload */ - struct io_wq *io_wq; - /* * For SQPOLL usage - we hold a reference to the parent task, so we * have access to the ->files @@ -1634,10 +1631,11 @@ static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link = io_prep_linked_timeout(req); + struct io_uring_task *tctx = req->task->io_uring; trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, &req->work, req->flags); - io_wq_enqueue(ctx->io_wq, &req->work); + io_wq_enqueue(tctx->io_wq, &req->work); return link; } @@ -5960,12 +5958,15 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) return req->user_data == (unsigned long) data; } -static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) +static int io_async_cancel_one(struct io_uring_task *tctx, void *sqe_addr) { enum io_wq_cancel cancel_ret; int ret = 0; - cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false); + if (!tctx->io_wq) + return -ENOENT; + + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, sqe_addr, false); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -5988,7 +5989,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx, unsigned long flags; int ret; - ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr); + ret = io_async_cancel_one(req->task->io_uring, + (void *) (unsigned long) sqe_addr); if (ret != -ENOENT) { spin_lock_irqsave(&ctx->completion_lock, flags); goto done; @@ -7537,16 +7539,6 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx) } } -static void io_finish_async(struct io_ring_ctx *ctx) -{ - io_sq_thread_stop(ctx); - - if (ctx->io_wq) { - io_wq_destroy(ctx->io_wq); - ctx->io_wq = NULL; - } -} - #if defined(CONFIG_UNIX) /* * Ensure the UNIX gc is aware of our file set, so we are certain that @@ -8105,11 +8097,10 @@ static struct io_wq_work *io_free_work(struct io_wq_work *work) return req ? &req->work : NULL; } -static int io_init_wq_offload(struct io_ring_ctx *ctx) +static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx) { struct io_wq_data data; unsigned int concurrency; - int ret = 0; data.user = ctx->user; data.free_work = io_free_work; @@ -8118,16 +8109,11 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx) /* Do QD, or 4 * CPUS, whatever is smallest */ concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - ctx->io_wq = io_wq_create(concurrency, &data); - if (IS_ERR(ctx->io_wq)) { - ret = PTR_ERR(ctx->io_wq); - ctx->io_wq = NULL; - } - - return ret; + return io_wq_create(concurrency, &data); } -static int io_uring_alloc_task_context(struct task_struct *task) +static int io_uring_alloc_task_context(struct task_struct *task, + struct io_ring_ctx *ctx) { struct io_uring_task *tctx; int ret; @@ -8142,6 +8128,14 @@ static int io_uring_alloc_task_context(struct task_struct *task) return ret; } + tctx->io_wq = io_init_wq_offload(ctx); + if (IS_ERR(tctx->io_wq)) { + ret = PTR_ERR(tctx->io_wq); + percpu_counter_destroy(&tctx->inflight); + kfree(tctx); + return ret; + } + xa_init(&tctx->xa); init_waitqueue_head(&tctx->wait); tctx->last = NULL; @@ -8214,7 +8208,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, ctx->sq_thread_idle = HZ; if (sqd->thread) - goto done; + return 0; if (p->flags & IORING_SETUP_SQ_AFF) { int cpu = p->sq_thread_cpu; @@ -8236,7 +8230,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->thread = NULL; goto err; } - ret = io_uring_alloc_task_context(sqd->thread); + ret = io_uring_alloc_task_context(sqd->thread, ctx); if (ret) goto err; } else if (p->flags & IORING_SETUP_SQ_AFF) { @@ -8245,14 +8239,9 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, goto err; } -done: - ret = io_init_wq_offload(ctx); - if (ret) - goto err; - return 0; err: - io_finish_async(ctx); + io_sq_thread_stop(ctx); return ret; } @@ -8727,7 +8716,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock); - io_finish_async(ctx); + io_sq_thread_stop(ctx); io_sqe_buffers_unregister(ctx); if (ctx->sqo_task) { @@ -8870,13 +8859,6 @@ static void io_ring_exit_work(struct work_struct *work) io_ring_ctx_free(ctx); } -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) -{ - struct io_kiocb *req = container_of(work, struct io_kiocb, work); - - return req->ctx == data; -} - static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { mutex_lock(&ctx->uring_lock); @@ -8895,9 +8877,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_kill_timeouts(ctx, NULL, NULL); io_poll_remove_all(ctx, NULL, NULL); - if (ctx->io_wq) - io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true); - /* if we failed setting up the ctx, we might not have any rings */ io_iopoll_try_reap_events(ctx); @@ -8976,13 +8955,14 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, struct files_struct *files) { struct io_task_cancel cancel = { .task = task, .files = files, }; + struct io_uring_task *tctx = current->io_uring; while (1) { enum io_wq_cancel cret; bool ret = false; - if (ctx->io_wq) { - cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, + if (tctx && tctx->io_wq) { + cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, &cancel, true); ret |= (cret != IO_WQ_CANCEL_NOTFOUND); } @@ -9094,7 +9074,7 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) int ret; if (unlikely(!tctx)) { - ret = io_uring_alloc_task_context(current); + ret = io_uring_alloc_task_context(current, ctx); if (unlikely(ret)) return ret; tctx = current->io_uring; @@ -9164,8 +9144,12 @@ void __io_uring_files_cancel(struct files_struct *files) io_uring_cancel_task_requests(file->private_data, files); atomic_dec(&tctx->in_idle); - if (files) + if (files) { io_uring_remove_task_files(tctx); + } else if (tctx->io_wq && current->flags & PF_EXITING) { + io_wq_destroy(tctx->io_wq); + tctx->io_wq = NULL; + } } static s64 tctx_inflight(struct io_uring_task *tctx) diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 2eb6d19de336..0e95398998b6 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -36,6 +36,7 @@ struct io_uring_task { struct xarray xa; struct wait_queue_head wait; struct file *last; + void *io_wq; struct percpu_counter inflight; struct io_identity __identity; struct io_identity *identity; -- cgit v1.2.3 From 958234d5ec9321445500dc5e69dfefb405b3d82c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 Feb 2021 09:00:57 -0700 Subject: io-wq: don't pass 'wqe' needlessly around Just grab it from the worker itself, which we're already passing in. Signed-off-by: Jens Axboe --- fs/io-wq.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index 0c47febfed9b..ec7f1106b659 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -201,9 +201,10 @@ static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, return &wqe->acct[IO_WQ_ACCT_BOUND]; } -static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe, - struct io_worker *worker) +static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) { + struct io_wqe *wqe = worker->wqe; + if (worker->flags & IO_WORKER_F_BOUND) return &wqe->acct[IO_WQ_ACCT_BOUND]; @@ -213,7 +214,7 @@ static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe, static void io_worker_exit(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); + struct io_wqe_acct *acct = io_wqe_get_acct(worker); /* * If we're not at zero, someone else is holding a brief reference @@ -303,23 +304,24 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) wake_up_process(wqe->wq->manager); } -static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker) +static void io_wqe_inc_running(struct io_worker *worker) { - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); + struct io_wqe_acct *acct = io_wqe_get_acct(worker); atomic_inc(&acct->nr_running); } -static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker) +static void io_wqe_dec_running(struct io_worker *worker) __must_hold(wqe->lock) { - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) io_wqe_wake_worker(wqe, acct); } -static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) +static void io_worker_start(struct io_worker *worker) { allow_kernel_signal(SIGINT); @@ -329,7 +331,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); worker->restore_nsproxy = current->nsproxy; - io_wqe_inc_running(wqe, worker); + io_wqe_inc_running(worker); } /* @@ -354,7 +356,7 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0; work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; if (worker_bound != work_bound) { - io_wqe_dec_running(wqe, worker); + io_wqe_dec_running(worker); if (work_bound) { worker->flags |= IO_WORKER_F_BOUND; wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; @@ -366,7 +368,7 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; atomic_inc(&wqe->wq->user->processes); } - io_wqe_inc_running(wqe, worker); + io_wqe_inc_running(worker); } } @@ -589,7 +591,7 @@ static int io_wqe_worker(void *data) struct io_wqe *wqe = worker->wqe; struct io_wq *wq = wqe->wq; - io_worker_start(wqe, worker); + io_worker_start(worker); while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { set_current_state(TASK_INTERRUPTIBLE); @@ -634,14 +636,13 @@ loop: void io_wq_worker_running(struct task_struct *tsk) { struct io_worker *worker = kthread_data(tsk); - struct io_wqe *wqe = worker->wqe; if (!(worker->flags & IO_WORKER_F_UP)) return; if (worker->flags & IO_WORKER_F_RUNNING) return; worker->flags |= IO_WORKER_F_RUNNING; - io_wqe_inc_running(wqe, worker); + io_wqe_inc_running(worker); } /* @@ -662,7 +663,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk) worker->flags &= ~IO_WORKER_F_RUNNING; raw_spin_lock_irq(&wqe->lock); - io_wqe_dec_running(wqe, worker); + io_wqe_dec_running(worker); raw_spin_unlock_irq(&wqe->lock); } -- cgit v1.2.3 From 3bfe6106693b6b4ba175ad1f929c4660b8f59ca8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 14:15:30 -0700 Subject: io-wq: fork worker threads from original task Instead of using regular kthread kernel threads, create kernel threads that are like a real thread that the task would create. This ensures that we get all the context that we need, without having to carry that state around. This greatly reduces the code complexity, and the risk of missing state for a given request type. With the move away from kthread, we can also dump everything related to assigned state to the new threads. Signed-off-by: Jens Axboe --- fs/io-wq.c | 301 +++++++++++++++++--------------------------------- fs/io-wq.h | 3 +- fs/io_uring.c | 7 ++ include/linux/sched.h | 3 + 4 files changed, 116 insertions(+), 198 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index ec7f1106b659..b53f569b5b4e 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -13,12 +13,9 @@ #include #include #include -#include #include -#include -#include -#include #include +#include #include "../kernel/sched/sched.h" #include "io-wq.h" @@ -57,13 +54,6 @@ struct io_worker { spinlock_t lock; struct rcu_head rcu; - struct mm_struct *mm; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *blkcg_css; -#endif - const struct cred *cur_creds; - const struct cred *saved_creds; - struct nsproxy *restore_nsproxy; }; #if BITS_PER_LONG == 64 @@ -122,6 +112,8 @@ struct io_wq { struct completion done; struct hlist_node cpuhp_node; + + pid_t task_pid; }; static enum cpuhp_state io_wq_online; @@ -137,61 +129,6 @@ static void io_worker_release(struct io_worker *worker) wake_up_process(worker->task); } -/* - * Note: drops the wqe->lock if returning true! The caller must re-acquire - * the lock in that case. Some callers need to restart handling if this - * happens, so we can't just re-acquire the lock on behalf of the caller. - */ -static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) -{ - bool dropped_lock = false; - - if (worker->saved_creds) { - revert_creds(worker->saved_creds); - worker->cur_creds = worker->saved_creds = NULL; - } - - if (current->files) { - __acquire(&wqe->lock); - raw_spin_unlock_irq(&wqe->lock); - dropped_lock = true; - - task_lock(current); - current->files = NULL; - current->nsproxy = worker->restore_nsproxy; - task_unlock(current); - } - - if (current->fs) - current->fs = NULL; - - /* - * If we have an active mm, we need to drop the wq lock before unusing - * it. If we do, return true and let the caller retry the idle loop. - */ - if (worker->mm) { - if (!dropped_lock) { - __acquire(&wqe->lock); - raw_spin_unlock_irq(&wqe->lock); - dropped_lock = true; - } - __set_current_state(TASK_RUNNING); - kthread_unuse_mm(worker->mm); - mmput(worker->mm); - worker->mm = NULL; - } - -#ifdef CONFIG_BLK_CGROUP - if (worker->blkcg_css) { - kthread_associate_blkcg(NULL); - worker->blkcg_css = NULL; - } -#endif - if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - return dropped_lock; -} - static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, struct io_wq_work *work) { @@ -237,10 +174,6 @@ static void io_worker_exit(struct io_worker *worker) raw_spin_lock_irq(&wqe->lock); hlist_nulls_del_rcu(&worker->nulls_node); list_del_rcu(&worker->all_list); - if (__io_worker_unuse(wqe, worker)) { - __release(&wqe->lock); - raw_spin_lock_irq(&wqe->lock); - } acct->nr_workers--; raw_spin_unlock_irq(&wqe->lock); @@ -323,14 +256,7 @@ static void io_wqe_dec_running(struct io_worker *worker) static void io_worker_start(struct io_worker *worker) { - allow_kernel_signal(SIGINT); - - current->flags |= PF_IO_WORKER; - current->fs = NULL; - current->files = NULL; - worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); - worker->restore_nsproxy = current->nsproxy; io_wqe_inc_running(worker); } @@ -387,7 +313,7 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); } - return __io_worker_unuse(wqe, worker); + return false; } static inline unsigned int io_get_work_hash(struct io_wq_work *work) @@ -426,96 +352,23 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) return NULL; } -static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) +static void io_flush_signals(void) { - if (worker->mm) { - kthread_unuse_mm(worker->mm); - mmput(worker->mm); - worker->mm = NULL; + if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { + if (current->task_works) + task_work_run(); + clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); } - - if (mmget_not_zero(work->identity->mm)) { - kthread_use_mm(work->identity->mm); - worker->mm = work->identity->mm; - return; - } - - /* failed grabbing mm, ensure work gets cancelled */ - work->flags |= IO_WQ_WORK_CANCEL; -} - -static inline void io_wq_switch_blkcg(struct io_worker *worker, - struct io_wq_work *work) -{ -#ifdef CONFIG_BLK_CGROUP - if (!(work->flags & IO_WQ_WORK_BLKCG)) - return; - if (work->identity->blkcg_css != worker->blkcg_css) { - kthread_associate_blkcg(work->identity->blkcg_css); - worker->blkcg_css = work->identity->blkcg_css; - } -#endif -} - -static void io_wq_switch_creds(struct io_worker *worker, - struct io_wq_work *work) -{ - const struct cred *old_creds = override_creds(work->identity->creds); - - worker->cur_creds = work->identity->creds; - if (worker->saved_creds) - put_cred(old_creds); /* creds set by previous switch */ - else - worker->saved_creds = old_creds; -} - -static void io_impersonate_work(struct io_worker *worker, - struct io_wq_work *work) -{ - if ((work->flags & IO_WQ_WORK_FILES) && - current->files != work->identity->files) { - task_lock(current); - current->files = work->identity->files; - current->nsproxy = work->identity->nsproxy; - task_unlock(current); - if (!work->identity->files) { - /* failed grabbing files, ensure work gets cancelled */ - work->flags |= IO_WQ_WORK_CANCEL; - } - } - if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs) - current->fs = work->identity->fs; - if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm) - io_wq_switch_mm(worker, work); - if ((work->flags & IO_WQ_WORK_CREDS) && - worker->cur_creds != work->identity->creds) - io_wq_switch_creds(worker, work); - if (work->flags & IO_WQ_WORK_FSIZE) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize; - else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - io_wq_switch_blkcg(worker, work); -#ifdef CONFIG_AUDIT - current->loginuid = work->identity->loginuid; - current->sessionid = work->identity->sessionid; -#endif } static void io_assign_current_work(struct io_worker *worker, struct io_wq_work *work) { if (work) { - /* flush pending signals before assigning new work */ - if (signal_pending(current)) - flush_signals(current); + io_flush_signals(); cond_resched(); } -#ifdef CONFIG_AUDIT - current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET); - current->sessionid = AUDIT_SID_UNSET; -#endif - spin_lock_irq(&worker->lock); worker->cur_work = work; spin_unlock_irq(&worker->lock); @@ -556,7 +409,6 @@ get_next: unsigned int hash = io_get_work_hash(work); next_hashed = wq_next_work(work); - io_impersonate_work(worker, work); wq->do_work(work); io_assign_current_work(worker, NULL); @@ -608,10 +460,11 @@ loop: goto loop; } raw_spin_unlock_irq(&wqe->lock); - if (signal_pending(current)) - flush_signals(current); + io_flush_signals(); if (schedule_timeout(WORKER_IDLE_TIMEOUT)) continue; + if (fatal_signal_pending(current)) + break; /* timed out, exit unless we're the fixed worker */ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || !(worker->flags & IO_WORKER_F_FIXED)) @@ -635,8 +488,10 @@ loop: */ void io_wq_worker_running(struct task_struct *tsk) { - struct io_worker *worker = kthread_data(tsk); + struct io_worker *worker = tsk->pf_io_worker; + if (!worker) + return; if (!(worker->flags & IO_WORKER_F_UP)) return; if (worker->flags & IO_WORKER_F_RUNNING) @@ -652,9 +507,10 @@ void io_wq_worker_running(struct task_struct *tsk) */ void io_wq_worker_sleeping(struct task_struct *tsk) { - struct io_worker *worker = kthread_data(tsk); - struct io_wqe *wqe = worker->wqe; + struct io_worker *worker = tsk->pf_io_worker; + if (!worker) + return; if (!(worker->flags & IO_WORKER_F_UP)) return; if (!(worker->flags & IO_WORKER_F_RUNNING)) @@ -662,32 +518,27 @@ void io_wq_worker_sleeping(struct task_struct *tsk) worker->flags &= ~IO_WORKER_F_RUNNING; - raw_spin_lock_irq(&wqe->lock); + raw_spin_lock_irq(&worker->wqe->lock); io_wqe_dec_running(worker); - raw_spin_unlock_irq(&wqe->lock); + raw_spin_unlock_irq(&worker->wqe->lock); } -static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) +static int task_thread(void *data, int index) { + struct io_worker *worker = data; + struct io_wqe *wqe = worker->wqe; struct io_wqe_acct *acct = &wqe->acct[index]; - struct io_worker *worker; + struct io_wq *wq = wqe->wq; + char buf[TASK_COMM_LEN]; - worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); - if (!worker) - return false; + sprintf(buf, "iou-wrk-%d", wq->task_pid); + set_task_comm(current, buf); - refcount_set(&worker->ref, 1); - worker->nulls_node.pprev = NULL; - worker->wqe = wqe; - spin_lock_init(&worker->lock); + current->pf_io_worker = worker; + worker->task = current; - worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node, - "io_wqe_worker-%d/%d", index, wqe->node); - if (IS_ERR(worker->task)) { - kfree(worker); - return false; - } - kthread_bind_mask(worker->task, cpumask_of_node(wqe->node)); + set_cpus_allowed_ptr(current, cpumask_of_node(wqe->node)); + current->flags |= PF_NO_SETAFFINITY; raw_spin_lock_irq(&wqe->lock); hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); @@ -703,8 +554,58 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) if (index == IO_WQ_ACCT_UNBOUND) atomic_inc(&wq->user->processes); + io_wqe_worker(data); + do_exit(0); +} + +static int task_thread_bound(void *data) +{ + return task_thread(data, IO_WQ_ACCT_BOUND); +} + +static int task_thread_unbound(void *data) +{ + return task_thread(data, IO_WQ_ACCT_UNBOUND); +} + +static pid_t fork_thread(int (*fn)(void *), void *arg) +{ + unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO|SIGCHLD; + struct kernel_clone_args args = { + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), + .exit_signal = (lower_32_bits(flags) & CSIGNAL), + .stack = (unsigned long)fn, + .stack_size = (unsigned long)arg, + }; + + return kernel_clone(&args); +} + +static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) +{ + struct io_worker *worker; + pid_t pid; + + worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); + if (!worker) + return false; + + refcount_set(&worker->ref, 1); + worker->nulls_node.pprev = NULL; + worker->wqe = wqe; + spin_lock_init(&worker->lock); + + if (index == IO_WQ_ACCT_BOUND) + pid = fork_thread(task_thread_bound, worker); + else + pid = fork_thread(task_thread_unbound, worker); + if (pid < 0) { + kfree(worker); + return false; + } refcount_inc(&wq->refs); - wake_up_process(worker->task); return true; } @@ -756,12 +657,17 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data) static int io_wq_manager(void *data) { struct io_wq *wq = data; + char buf[TASK_COMM_LEN]; int node; - refcount_set(&wq->refs, 1); + sprintf(buf, "iou-mgr-%d", wq->task_pid); + set_task_comm(current, buf); + current->flags |= PF_IO_WORKER; + wq->manager = current; + complete(&wq->done); - while (!kthread_should_stop()) { + while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; bool fork_worker[2] = { false, false }; @@ -782,11 +688,13 @@ static int io_wq_manager(void *data) } set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); + if (fatal_signal_pending(current)) + set_bit(IO_WQ_BIT_EXIT, &wq->state); } if (refcount_dec_and_test(&wq->refs)) { complete(&wq->done); - return 0; + do_exit(0); } /* if ERROR is set and we get here, we have workers to wake */ if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { @@ -795,7 +703,7 @@ static int io_wq_manager(void *data) io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); rcu_read_unlock(); } - return 0; + do_exit(0); } static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, @@ -919,7 +827,7 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data) spin_lock_irqsave(&worker->lock, flags); if (worker->cur_work && match->fn(worker->cur_work, match->data)) { - send_sig(SIGINT, worker->task, 1); + set_notify_signal(worker->task); match->nr_running++; } spin_unlock_irqrestore(&worker->lock, flags); @@ -1075,22 +983,21 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) INIT_LIST_HEAD(&wqe->all_list); } + wq->task_pid = current->pid; init_completion(&wq->done); + refcount_set(&wq->refs, 1); - wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); - if (!IS_ERR(wq->manager)) { - wake_up_process(wq->manager); + current->flags |= PF_IO_WORKER; + ret = fork_thread(io_wq_manager, wq); + current->flags &= ~PF_IO_WORKER; + if (ret >= 0) { wait_for_completion(&wq->done); - if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { - ret = -ENOMEM; - goto err; - } reinit_completion(&wq->done); return wq; } - ret = PTR_ERR(wq->manager); - complete(&wq->done); + if (refcount_dec_and_test(&wq->refs)) + complete(&wq->done); err: cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); for_each_node(node) @@ -1110,7 +1017,7 @@ void io_wq_destroy(struct io_wq *wq) set_bit(IO_WQ_BIT_EXIT, &wq->state); if (wq->manager) - kthread_stop(wq->manager); + wake_up_process(wq->manager); rcu_read_lock(); for_each_node(node) diff --git a/fs/io-wq.h b/fs/io-wq.h index d2cf284b4641..83d56adabd16 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -137,6 +137,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk) static inline bool io_wq_current_is_worker(void) { - return in_task() && (current->flags & PF_IO_WORKER); + return in_task() && (current->flags & PF_IO_WORKER) && + current->pf_io_worker; } #endif diff --git a/fs/io_uring.c b/fs/io_uring.c index 31402a19fca6..9d22ec9d9406 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1633,6 +1633,9 @@ static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->task->io_uring; + BUG_ON(!tctx); + BUG_ON(!tctx->io_wq); + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, &req->work, req->flags); io_wq_enqueue(tctx->io_wq, &req->work); @@ -9240,6 +9243,10 @@ static int io_uring_flush(struct file *file, void *data) struct io_uring_task *tctx = current->io_uring; struct io_ring_ctx *ctx = file->private_data; + /* Ignore helper thread files exit */ + if (current->flags & PF_IO_WORKER) + return 0; + if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) { io_uring_cancel_task_requests(ctx, NULL); io_req_caches_free(ctx, current); diff --git a/include/linux/sched.h b/include/linux/sched.h index 26f499810dfa..ef00bb22164c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -895,6 +895,9 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; + /* PF_IO_WORKER */ + void *pf_io_worker; + u64 utime; u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME -- cgit v1.2.3 From c6d77d92b7e53b24e8e74a58e6ef2056385cc780 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 15 Feb 2021 13:26:34 -0700 Subject: io-wq: worker idling always returns false Remove the bool return, and the checking for it in the caller. Signed-off-by: Jens Axboe --- fs/io-wq.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index b53f569b5b4e..41042119bf0f 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -305,15 +305,13 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, * retry the loop in that case (we changed task state), we don't regrab * the lock if we return success. */ -static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) +static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) __must_hold(wqe->lock) { if (!(worker->flags & IO_WORKER_F_FREE)) { worker->flags |= IO_WORKER_F_FREE; hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); } - - return false; } static inline unsigned int io_get_work_hash(struct io_wq_work *work) @@ -454,11 +452,7 @@ loop: io_worker_handle_work(worker); goto loop; } - /* drops the lock on success, retry */ - if (__io_worker_idle(wqe, worker)) { - __release(&wqe->lock); - goto loop; - } + __io_worker_idle(wqe, worker); raw_spin_unlock_irq(&wqe->lock); io_flush_signals(); if (schedule_timeout(WORKER_IDLE_TIMEOUT)) -- cgit v1.2.3 From 44526bedc2ff8fcd58552e3c5bae928524b6f13c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 15 Feb 2021 13:32:18 -0700 Subject: io_uring: remove any grabbing of context The async workers are siblings of the task itself, so by definition we have all the state that we need. Remove any of the state grabbing that we have, and requests flagging what they need. Signed-off-by: Jens Axboe --- fs/io-wq.h | 7 -- fs/io_uring.c | 236 ++++------------------------------------------------------ 2 files changed, 14 insertions(+), 229 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.h b/fs/io-wq.h index 83d56adabd16..bbe05dd54716 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -11,13 +11,6 @@ enum { IO_WQ_WORK_UNBOUND = 4, IO_WQ_WORK_CONCURRENT = 16, - IO_WQ_WORK_FILES = 32, - IO_WQ_WORK_FS = 64, - IO_WQ_WORK_MM = 128, - IO_WQ_WORK_CREDS = 256, - IO_WQ_WORK_BLKCG = 512, - IO_WQ_WORK_FSIZE = 1024, - IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ }; diff --git a/fs/io_uring.c b/fs/io_uring.c index 9d22ec9d9406..6e88295758b5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -838,7 +838,6 @@ struct io_op_def { unsigned plug : 1; /* size of async data needed, if any */ unsigned short async_size; - unsigned work_flags; }; static const struct io_op_def io_op_defs[] = { @@ -851,7 +850,6 @@ static const struct io_op_def io_op_defs[] = { .needs_async_data = 1, .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_WRITEV] = { .needs_file = 1, @@ -861,12 +859,9 @@ static const struct io_op_def io_op_defs[] = { .needs_async_data = 1, .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FSIZE, }, [IORING_OP_FSYNC] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_READ_FIXED] = { .needs_file = 1, @@ -874,7 +869,6 @@ static const struct io_op_def io_op_defs[] = { .pollin = 1, .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM, }, [IORING_OP_WRITE_FIXED] = { .needs_file = 1, @@ -883,8 +877,6 @@ static const struct io_op_def io_op_defs[] = { .pollout = 1, .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE | - IO_WQ_WORK_MM, }, [IORING_OP_POLL_ADD] = { .needs_file = 1, @@ -893,7 +885,6 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_POLL_REMOVE] = {}, [IORING_OP_SYNC_FILE_RANGE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_SENDMSG] = { .needs_file = 1, @@ -901,8 +892,6 @@ static const struct io_op_def io_op_defs[] = { .pollout = 1, .needs_async_data = 1, .async_size = sizeof(struct io_async_msghdr), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_RECVMSG] = { .needs_file = 1, @@ -911,29 +900,23 @@ static const struct io_op_def io_op_defs[] = { .buffer_select = 1, .needs_async_data = 1, .async_size = sizeof(struct io_async_msghdr), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_TIMEOUT] = { .needs_async_data = 1, .async_size = sizeof(struct io_timeout_data), - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_TIMEOUT_REMOVE] = { /* used by timeout updates' prep() */ - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_ACCEPT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES, }, [IORING_OP_ASYNC_CANCEL] = {}, [IORING_OP_LINK_TIMEOUT] = { .needs_async_data = 1, .async_size = sizeof(struct io_timeout_data), - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_CONNECT] = { .needs_file = 1, @@ -941,26 +924,14 @@ static const struct io_op_def io_op_defs[] = { .pollout = 1, .needs_async_data = 1, .async_size = sizeof(struct io_async_connect), - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_FALLOCATE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE, - }, - [IORING_OP_OPENAT] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS | IO_WQ_WORK_MM, - }, - [IORING_OP_CLOSE] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG, - }, - [IORING_OP_FILES_UPDATE] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM, - }, - [IORING_OP_STATX] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM | - IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG, }, + [IORING_OP_OPENAT] = {}, + [IORING_OP_CLOSE] = {}, + [IORING_OP_FILES_UPDATE] = {}, + [IORING_OP_STATX] = {}, [IORING_OP_READ] = { .needs_file = 1, .unbound_nonreg_file = 1, @@ -968,7 +939,6 @@ static const struct io_op_def io_op_defs[] = { .buffer_select = 1, .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_WRITE] = { .needs_file = 1, @@ -976,42 +946,31 @@ static const struct io_op_def io_op_defs[] = { .pollout = 1, .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FSIZE, }, [IORING_OP_FADVISE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, - }, - [IORING_OP_MADVISE] = { - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, + [IORING_OP_MADVISE] = {}, [IORING_OP_SEND] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_RECV] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_OPENAT2] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS | - IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM, }, [IORING_OP_EPOLL_CTL] = { .unbound_nonreg_file = 1, - .work_flags = IO_WQ_WORK_FILES, }, [IORING_OP_SPLICE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_PROVIDE_BUFFERS] = {}, [IORING_OP_REMOVE_BUFFERS] = {}, @@ -1023,14 +982,8 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_SHUTDOWN] = { .needs_file = 1, }, - [IORING_OP_RENAMEAT] = { - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES | - IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG, - }, - [IORING_OP_UNLINKAT] = { - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES | - IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG, - }, + [IORING_OP_RENAMEAT] = {}, + [IORING_OP_UNLINKAT] = {}, }; static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, @@ -1141,8 +1094,7 @@ static bool io_match_task(struct io_kiocb *head, continue; if (req->file && req->file->f_op == &io_uring_fops) return true; - if ((req->work.flags & IO_WQ_WORK_FILES) && - req->work.identity->files == files) + if (req->work.identity->files == files) return true; } return false; @@ -1219,20 +1171,15 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, struct io_kiocb *req) { - const struct io_op_def *def = &io_op_defs[req->opcode]; int ret; - if (def->work_flags & IO_WQ_WORK_MM) { - ret = __io_sq_thread_acquire_mm(ctx); - if (unlikely(ret)) - return ret; - } + ret = __io_sq_thread_acquire_mm(ctx); + if (unlikely(ret)) + return ret; - if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES)) { - ret = __io_sq_thread_acquire_files(ctx); - if (unlikely(ret)) - return ret; - } + ret = __io_sq_thread_acquire_files(ctx); + if (unlikely(ret)) + return ret; return 0; } @@ -1416,28 +1363,6 @@ static void io_req_clean_work(struct io_kiocb *req) if (!(req->flags & REQ_F_WORK_INITIALIZED)) return; - if (req->work.flags & IO_WQ_WORK_MM) - mmdrop(req->work.identity->mm); -#ifdef CONFIG_BLK_CGROUP - if (req->work.flags & IO_WQ_WORK_BLKCG) - css_put(req->work.identity->blkcg_css); -#endif - if (req->work.flags & IO_WQ_WORK_CREDS) - put_cred(req->work.identity->creds); - if (req->work.flags & IO_WQ_WORK_FS) { - struct fs_struct *fs = req->work.identity->fs; - - spin_lock(&req->work.identity->fs->lock); - if (--fs->users) - fs = NULL; - spin_unlock(&req->work.identity->fs->lock); - if (fs) - free_fs_struct(fs); - } - if (req->work.flags & IO_WQ_WORK_FILES) { - put_files_struct(req->work.identity->files); - put_nsproxy(req->work.identity->nsproxy); - } if (req->flags & REQ_F_INFLIGHT) { struct io_ring_ctx *ctx = req->ctx; struct io_uring_task *tctx = req->task->io_uring; @@ -1452,56 +1377,9 @@ static void io_req_clean_work(struct io_kiocb *req) } req->flags &= ~REQ_F_WORK_INITIALIZED; - req->work.flags &= ~(IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | IO_WQ_WORK_FS | - IO_WQ_WORK_CREDS | IO_WQ_WORK_FILES); io_put_identity(req->task->io_uring, req); } -/* - * Create a private copy of io_identity, since some fields don't match - * the current context. - */ -static bool io_identity_cow(struct io_kiocb *req) -{ - struct io_uring_task *tctx = current->io_uring; - const struct cred *creds = NULL; - struct io_identity *id; - - if (req->work.flags & IO_WQ_WORK_CREDS) - creds = req->work.identity->creds; - - id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL); - if (unlikely(!id)) { - req->work.flags |= IO_WQ_WORK_CANCEL; - return false; - } - - /* - * We can safely just re-init the creds we copied Either the field - * matches the current one, or we haven't grabbed it yet. The only - * exception is ->creds, through registered personalities, so handle - * that one separately. - */ - io_init_identity(id); - if (creds) - id->creds = creds; - - /* add one for this request */ - refcount_inc(&id->count); - - /* drop tctx and req identity references, if needed */ - if (tctx->identity != &tctx->__identity && - refcount_dec_and_test(&tctx->identity->count)) - kfree(tctx->identity); - if (req->work.identity != &tctx->__identity && - refcount_dec_and_test(&req->work.identity->count)) - kfree(req->work.identity); - - req->work.identity = id; - tctx->identity = id; - return true; -} - static void io_req_track_inflight(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; @@ -1516,79 +1394,6 @@ static void io_req_track_inflight(struct io_kiocb *req) } } -static bool io_grab_identity(struct io_kiocb *req) -{ - const struct io_op_def *def = &io_op_defs[req->opcode]; - struct io_identity *id = req->work.identity; - - if (def->work_flags & IO_WQ_WORK_FSIZE) { - if (id->fsize != rlimit(RLIMIT_FSIZE)) - return false; - req->work.flags |= IO_WQ_WORK_FSIZE; - } -#ifdef CONFIG_BLK_CGROUP - if (!(req->work.flags & IO_WQ_WORK_BLKCG) && - (def->work_flags & IO_WQ_WORK_BLKCG)) { - rcu_read_lock(); - if (id->blkcg_css != blkcg_css()) { - rcu_read_unlock(); - return false; - } - /* - * This should be rare, either the cgroup is dying or the task - * is moving cgroups. Just punt to root for the handful of ios. - */ - if (css_tryget_online(id->blkcg_css)) - req->work.flags |= IO_WQ_WORK_BLKCG; - rcu_read_unlock(); - } -#endif - if (!(req->work.flags & IO_WQ_WORK_CREDS)) { - if (id->creds != current_cred()) - return false; - get_cred(id->creds); - req->work.flags |= IO_WQ_WORK_CREDS; - } -#ifdef CONFIG_AUDIT - if (!uid_eq(current->loginuid, id->loginuid) || - current->sessionid != id->sessionid) - return false; -#endif - if (!(req->work.flags & IO_WQ_WORK_FS) && - (def->work_flags & IO_WQ_WORK_FS)) { - if (current->fs != id->fs) - return false; - spin_lock(&id->fs->lock); - if (!id->fs->in_exec) { - id->fs->users++; - req->work.flags |= IO_WQ_WORK_FS; - } else { - req->work.flags |= IO_WQ_WORK_CANCEL; - } - spin_unlock(¤t->fs->lock); - } - if (!(req->work.flags & IO_WQ_WORK_FILES) && - (def->work_flags & IO_WQ_WORK_FILES) && - !(req->flags & REQ_F_NO_FILE_TABLE)) { - if (id->files != current->files || - id->nsproxy != current->nsproxy) - return false; - atomic_inc(&id->files->count); - get_nsproxy(id->nsproxy); - req->work.flags |= IO_WQ_WORK_FILES; - io_req_track_inflight(req); - } - if (!(req->work.flags & IO_WQ_WORK_MM) && - (def->work_flags & IO_WQ_WORK_MM)) { - if (id->mm != current->mm) - return false; - mmgrab(id->mm); - req->work.flags |= IO_WQ_WORK_MM; - } - - return true; -} - static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -1606,17 +1411,6 @@ static void io_prep_async_work(struct io_kiocb *req) if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } - - /* if we fail grabbing identity, we must COW, regrab, and retry */ - if (io_grab_identity(req)) - return; - - if (!io_identity_cow(req)) - return; - - /* can't fail at this point */ - if (!io_grab_identity(req)) - WARN_ON(1); } static void io_prep_async_link(struct io_kiocb *req) @@ -6583,7 +6377,6 @@ static void __io_queue_sqe(struct io_kiocb *req) int ret; if ((req->flags & REQ_F_WORK_INITIALIZED) && - (req->work.flags & IO_WQ_WORK_CREDS) && req->work.identity->creds != current_cred()) old_creds = override_creds(req->work.identity->creds); @@ -6725,7 +6518,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, __io_req_init_async(req); get_cred(iod->creds); req->work.identity = iod; - req->work.flags |= IO_WQ_WORK_CREDS; } state = &ctx->submit_state; -- cgit v1.2.3 From 4379bf8bd70b5de6bba7d53015b0c36c57a634ee Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 15 Feb 2021 13:40:22 -0700 Subject: io_uring: remove io_identity We are no longer grabbing state, so no need to maintain an IO identity that we COW if there are changes. Signed-off-by: Jens Axboe --- fs/io-wq.c | 26 ++++++++++++ fs/io-wq.h | 2 +- fs/io_uring.c | 104 ++++++++++++----------------------------------- include/linux/io_uring.h | 19 --------- 4 files changed, 52 insertions(+), 99 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index 41042119bf0f..acc67ed3a52c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -53,6 +53,9 @@ struct io_worker { struct io_wq_work *cur_work; spinlock_t lock; + const struct cred *cur_creds; + const struct cred *saved_creds; + struct rcu_head rcu; }; @@ -171,6 +174,11 @@ static void io_worker_exit(struct io_worker *worker) worker->flags = 0; preempt_enable(); + if (worker->saved_creds) { + revert_creds(worker->saved_creds); + worker->cur_creds = worker->saved_creds = NULL; + } + raw_spin_lock_irq(&wqe->lock); hlist_nulls_del_rcu(&worker->nulls_node); list_del_rcu(&worker->all_list); @@ -312,6 +320,10 @@ static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) worker->flags |= IO_WORKER_F_FREE; hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); } + if (worker->saved_creds) { + revert_creds(worker->saved_creds); + worker->cur_creds = worker->saved_creds = NULL; + } } static inline unsigned int io_get_work_hash(struct io_wq_work *work) @@ -359,6 +371,18 @@ static void io_flush_signals(void) } } +static void io_wq_switch_creds(struct io_worker *worker, + struct io_wq_work *work) +{ + const struct cred *old_creds = override_creds(work->creds); + + worker->cur_creds = work->creds; + if (worker->saved_creds) + put_cred(old_creds); /* creds set by previous switch */ + else + worker->saved_creds = old_creds; +} + static void io_assign_current_work(struct io_worker *worker, struct io_wq_work *work) { @@ -407,6 +431,8 @@ get_next: unsigned int hash = io_get_work_hash(work); next_hashed = wq_next_work(work); + if (work->creds && worker->cur_creds != work->creds) + io_wq_switch_creds(worker, work); wq->do_work(work); io_assign_current_work(worker, NULL); diff --git a/fs/io-wq.h b/fs/io-wq.h index bbe05dd54716..584f0bd5a83d 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -78,7 +78,7 @@ static inline void wq_list_del(struct io_wq_work_list *list, struct io_wq_work { struct io_wq_work_node list; - struct io_identity *identity; + const struct cred *creds; unsigned flags; }; diff --git a/fs/io_uring.c b/fs/io_uring.c index 6e88295758b5..6d851033e48d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1094,7 +1094,7 @@ static bool io_match_task(struct io_kiocb *head, continue; if (req->file && req->file->f_op == &io_uring_fops) return true; - if (req->work.identity->files == files) + if (req->task->files == files) return true; } return false; @@ -1218,31 +1218,6 @@ static inline void req_set_fail_links(struct io_kiocb *req) req->flags |= REQ_F_FAIL_LINK; } -/* - * None of these are dereferenced, they are simply used to check if any of - * them have changed. If we're under current and check they are still the - * same, we're fine to grab references to them for actual out-of-line use. - */ -static void io_init_identity(struct io_identity *id) -{ - id->files = current->files; - id->mm = current->mm; -#ifdef CONFIG_BLK_CGROUP - rcu_read_lock(); - id->blkcg_css = blkcg_css(); - rcu_read_unlock(); -#endif - id->creds = current_cred(); - id->nsproxy = current->nsproxy; - id->fs = current->fs; - id->fsize = rlimit(RLIMIT_FSIZE); -#ifdef CONFIG_AUDIT - id->loginuid = current->loginuid; - id->sessionid = current->sessionid; -#endif - refcount_set(&id->count, 1); -} - static inline void __io_req_init_async(struct io_kiocb *req) { memset(&req->work, 0, sizeof(req->work)); @@ -1255,17 +1230,10 @@ static inline void __io_req_init_async(struct io_kiocb *req) */ static inline void io_req_init_async(struct io_kiocb *req) { - struct io_uring_task *tctx = current->io_uring; - if (req->flags & REQ_F_WORK_INITIALIZED) return; __io_req_init_async(req); - - /* Grab a ref if this isn't our static identity */ - req->work.identity = tctx->identity; - if (tctx->identity != &tctx->__identity) - refcount_inc(&req->work.identity->count); } static void io_ring_ctx_ref_free(struct percpu_ref *ref) @@ -1350,19 +1318,15 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq) return false; } -static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) -{ - if (req->work.identity == &tctx->__identity) - return; - if (refcount_dec_and_test(&req->work.identity->count)) - kfree(req->work.identity); -} - static void io_req_clean_work(struct io_kiocb *req) { if (!(req->flags & REQ_F_WORK_INITIALIZED)) return; + if (req->work.creds) { + put_cred(req->work.creds); + req->work.creds = NULL; + } if (req->flags & REQ_F_INFLIGHT) { struct io_ring_ctx *ctx = req->ctx; struct io_uring_task *tctx = req->task->io_uring; @@ -1377,7 +1341,6 @@ static void io_req_clean_work(struct io_kiocb *req) } req->flags &= ~REQ_F_WORK_INITIALIZED; - io_put_identity(req->task->io_uring, req); } static void io_req_track_inflight(struct io_kiocb *req) @@ -1411,6 +1374,8 @@ static void io_prep_async_work(struct io_kiocb *req) if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } + if (!req->work.creds) + req->work.creds = get_current_cred(); } static void io_prep_async_link(struct io_kiocb *req) @@ -6376,9 +6341,9 @@ static void __io_queue_sqe(struct io_kiocb *req) const struct cred *old_creds = NULL; int ret; - if ((req->flags & REQ_F_WORK_INITIALIZED) && - req->work.identity->creds != current_cred()) - old_creds = override_creds(req->work.identity->creds); + if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds && + req->work.creds != current_cred()) + old_creds = override_creds(req->work.creds); ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); @@ -6508,16 +6473,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, id = READ_ONCE(sqe->personality); if (id) { - struct io_identity *iod; - - iod = idr_find(&ctx->personality_idr, id); - if (unlikely(!iod)) - return -EINVAL; - refcount_inc(&iod->count); - __io_req_init_async(req); - get_cred(iod->creds); - req->work.identity = iod; + req->work.creds = idr_find(&ctx->personality_idr, id); + if (unlikely(!req->work.creds)) + return -EINVAL; + get_cred(req->work.creds); } state = &ctx->submit_state; @@ -7936,8 +7896,6 @@ static int io_uring_alloc_task_context(struct task_struct *task, tctx->last = NULL; atomic_set(&tctx->in_idle, 0); tctx->sqpoll = false; - io_init_identity(&tctx->__identity); - tctx->identity = &tctx->__identity; task->io_uring = tctx; spin_lock_init(&tctx->task_lock); INIT_WQ_LIST(&tctx->task_list); @@ -7951,9 +7909,6 @@ void __io_uring_free(struct task_struct *tsk) struct io_uring_task *tctx = tsk->io_uring; WARN_ON_ONCE(!xa_empty(&tctx->xa)); - WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1); - if (tctx->identity != &tctx->__identity) - kfree(tctx->identity); percpu_counter_destroy(&tctx->inflight); kfree(tctx); tsk->io_uring = NULL; @@ -8593,13 +8548,11 @@ static int io_uring_fasync(int fd, struct file *file, int on) static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) { - struct io_identity *iod; + const struct cred *creds; - iod = idr_remove(&ctx->personality_idr, id); - if (iod) { - put_cred(iod->creds); - if (refcount_dec_and_test(&iod->count)) - kfree(iod); + creds = idr_remove(&ctx->personality_idr, id); + if (creds) { + put_cred(creds); return 0; } @@ -9300,8 +9253,7 @@ out_fput: #ifdef CONFIG_PROC_FS static int io_uring_show_cred(int id, void *p, void *data) { - struct io_identity *iod = p; - const struct cred *cred = iod->creds; + const struct cred *cred = p; struct seq_file *m = data; struct user_namespace *uns = seq_user_ns(m); struct group_info *gi; @@ -9732,21 +9684,15 @@ out: static int io_register_personality(struct io_ring_ctx *ctx) { - struct io_identity *id; + const struct cred *creds; int ret; - id = kmalloc(sizeof(*id), GFP_KERNEL); - if (unlikely(!id)) - return -ENOMEM; - - io_init_identity(id); - id->creds = get_current_cred(); + creds = get_current_cred(); - ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL); - if (ret < 0) { - put_cred(id->creds); - kfree(id); - } + ret = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1, + USHRT_MAX, GFP_KERNEL); + if (ret < 0) + put_cred(creds); return ret; } diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 0e95398998b6..c48fcbdc2ea8 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -5,23 +5,6 @@ #include #include -struct io_identity { - struct files_struct *files; - struct mm_struct *mm; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *blkcg_css; -#endif - const struct cred *creds; - struct nsproxy *nsproxy; - struct fs_struct *fs; - unsigned long fsize; -#ifdef CONFIG_AUDIT - kuid_t loginuid; - unsigned int sessionid; -#endif - refcount_t count; -}; - struct io_wq_work_node { struct io_wq_work_node *next; }; @@ -38,8 +21,6 @@ struct io_uring_task { struct file *last; void *io_wq; struct percpu_counter inflight; - struct io_identity __identity; - struct io_identity *identity; atomic_t in_idle; bool sqpoll; -- cgit v1.2.3 From bf1daa4bfc77a60e58bed392e659c9ddd0174340 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Feb 2021 18:00:55 -0700 Subject: io-wq: only remove worker from free_list, if it was there If the worker isn't on the free_list, don't attempt to delete it. Signed-off-by: Jens Axboe --- fs/io-wq.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index acc67ed3a52c..3a506f1c7838 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -155,6 +155,7 @@ static void io_worker_exit(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; struct io_wqe_acct *acct = io_wqe_get_acct(worker); + unsigned flags; /* * If we're not at zero, someone else is holding a brief reference @@ -167,9 +168,11 @@ static void io_worker_exit(struct io_worker *worker) preempt_disable(); current->flags &= ~PF_IO_WORKER; - if (worker->flags & IO_WORKER_F_RUNNING) + flags = worker->flags; + worker->flags = 0; + if (flags & IO_WORKER_F_RUNNING) atomic_dec(&acct->nr_running); - if (!(worker->flags & IO_WORKER_F_BOUND)) + if (!(flags & IO_WORKER_F_BOUND)) atomic_dec(&wqe->wq->user->processes); worker->flags = 0; preempt_enable(); @@ -180,7 +183,8 @@ static void io_worker_exit(struct io_worker *worker) } raw_spin_lock_irq(&wqe->lock); - hlist_nulls_del_rcu(&worker->nulls_node); + if (flags & IO_WORKER_F_FREE) + hlist_nulls_del_rcu(&worker->nulls_node); list_del_rcu(&worker->all_list); acct->nr_workers--; raw_spin_unlock_irq(&wqe->lock); -- cgit v1.2.3 From 843bbfd49f02caab7186910480a86378bb84e975 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 Feb 2021 21:05:41 -0700 Subject: io-wq: make io_wq_fork_thread() available to other users We want to use this in io_uring proper as well, for the SQPOLL thread. Rename it from fork_thread() to io_wq_fork_thread(), and make it available through the io-wq.h header. Signed-off-by: Jens Axboe --- fs/io-wq.c | 8 ++++---- fs/io-wq.h | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index 3a506f1c7838..b0d09f60200b 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -592,7 +592,7 @@ static int task_thread_unbound(void *data) return task_thread(data, IO_WQ_ACCT_UNBOUND); } -static pid_t fork_thread(int (*fn)(void *), void *arg) +pid_t io_wq_fork_thread(int (*fn)(void *), void *arg) { unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| CLONE_IO|SIGCHLD; @@ -622,9 +622,9 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) spin_lock_init(&worker->lock); if (index == IO_WQ_ACCT_BOUND) - pid = fork_thread(task_thread_bound, worker); + pid = io_wq_fork_thread(task_thread_bound, worker); else - pid = fork_thread(task_thread_unbound, worker); + pid = io_wq_fork_thread(task_thread_unbound, worker); if (pid < 0) { kfree(worker); return false; @@ -1012,7 +1012,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) refcount_set(&wq->refs, 1); current->flags |= PF_IO_WORKER; - ret = fork_thread(io_wq_manager, wq); + ret = io_wq_fork_thread(io_wq_manager, wq); current->flags &= ~PF_IO_WORKER; if (ret >= 0) { wait_for_completion(&wq->done); diff --git a/fs/io-wq.h b/fs/io-wq.h index 584f0bd5a83d..23f6cbd620f8 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -106,6 +106,8 @@ void io_wq_destroy(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); void io_wq_hash_work(struct io_wq_work *work, void *val); +pid_t io_wq_fork_thread(int (*fn)(void *), void *arg); + static inline bool io_wq_is_hashed(struct io_wq_work *work) { return work->flags & IO_WQ_WORK_HASHED; -- cgit v1.2.3 From 37d1e2e3642e2380750d7f35279180826f29660e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 17 Feb 2021 21:03:43 -0700 Subject: io_uring: move SQPOLL thread io-wq forked worker Don't use a kthread for SQPOLL, use a forked worker just like the io-wq workers. With that done, we can drop the various context grabbing we do for SQPOLL, it already has everything it needs. Signed-off-by: Jens Axboe --- fs/io_uring.c | 473 ++++++++++++++++++++++------------------------------------ 1 file changed, 181 insertions(+), 292 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 6d851033e48d..378cf79e66c9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -254,6 +253,11 @@ struct io_restriction { bool registered; }; +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK, +}; + struct io_sq_data { refcount_t refs; struct mutex lock; @@ -267,6 +271,13 @@ struct io_sq_data { struct wait_queue_head wait; unsigned sq_thread_idle; + int sq_cpu; + pid_t task_pid; + + unsigned long state; + struct completion startup; + struct completion completion; + struct completion exited; }; #define IO_IOPOLL_BATCH 8 @@ -367,18 +378,13 @@ struct io_ring_ctx { struct io_rings *rings; /* - * For SQPOLL usage - we hold a reference to the parent task, so we - * have access to the ->files + * For SQPOLL usage */ struct task_struct *sqo_task; /* Only used for accounting purposes */ struct mm_struct *mm_account; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *sqo_blkcg_css; -#endif - struct io_sq_data *sq_data; /* if using sq thread polling */ struct wait_queue_head sqo_sq_wait; @@ -398,13 +404,6 @@ struct io_ring_ctx { struct user_struct *user; - const struct cred *creds; - -#ifdef CONFIG_AUDIT - kuid_t loginuid; - unsigned int sessionid; -#endif - struct completion ref_comp; struct completion sq_thread_comp; @@ -989,6 +988,7 @@ static const struct io_op_def io_op_defs[] = { static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, struct task_struct *task, struct files_struct *files); +static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx); static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node); static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node( struct io_ring_ctx *ctx); @@ -1100,118 +1100,6 @@ static bool io_match_task(struct io_kiocb *head, return false; } -static void io_sq_thread_drop_mm_files(void) -{ - struct files_struct *files = current->files; - struct mm_struct *mm = current->mm; - - if (mm) { - kthread_unuse_mm(mm); - mmput(mm); - current->mm = NULL; - } - if (files) { - struct nsproxy *nsproxy = current->nsproxy; - - task_lock(current); - current->files = NULL; - current->nsproxy = NULL; - task_unlock(current); - put_files_struct(files); - put_nsproxy(nsproxy); - } -} - -static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx) -{ - if (!current->files) { - struct files_struct *files; - struct nsproxy *nsproxy; - - task_lock(ctx->sqo_task); - files = ctx->sqo_task->files; - if (!files) { - task_unlock(ctx->sqo_task); - return -EOWNERDEAD; - } - atomic_inc(&files->count); - get_nsproxy(ctx->sqo_task->nsproxy); - nsproxy = ctx->sqo_task->nsproxy; - task_unlock(ctx->sqo_task); - - task_lock(current); - current->files = files; - current->nsproxy = nsproxy; - task_unlock(current); - } - return 0; -} - -static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) -{ - struct mm_struct *mm; - - if (current->mm) - return 0; - - task_lock(ctx->sqo_task); - mm = ctx->sqo_task->mm; - if (unlikely(!mm || !mmget_not_zero(mm))) - mm = NULL; - task_unlock(ctx->sqo_task); - - if (mm) { - kthread_use_mm(mm); - return 0; - } - - return -EFAULT; -} - -static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - int ret; - - ret = __io_sq_thread_acquire_mm(ctx); - if (unlikely(ret)) - return ret; - - ret = __io_sq_thread_acquire_files(ctx); - if (unlikely(ret)) - return ret; - - return 0; -} - -static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - if (!(ctx->flags & IORING_SETUP_SQPOLL)) - return 0; - return __io_sq_thread_acquire_mm_files(ctx, req); -} - -static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx, - struct cgroup_subsys_state **cur_css) - -{ -#ifdef CONFIG_BLK_CGROUP - /* puts the old one when swapping */ - if (*cur_css != ctx->sqo_blkcg_css) { - kthread_associate_blkcg(ctx->sqo_blkcg_css); - *cur_css = ctx->sqo_blkcg_css; - } -#endif -} - -static void io_sq_thread_unassociate_blkcg(void) -{ -#ifdef CONFIG_BLK_CGROUP - kthread_associate_blkcg(NULL); -#endif -} - static inline void req_set_fail_links(struct io_kiocb *req) { if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) @@ -2132,15 +2020,11 @@ static void __io_req_task_submit(struct io_kiocb *req) /* ctx stays valid until unlock, even if we drop all ours ctx->refs */ mutex_lock(&ctx->uring_lock); - if (!ctx->sqo_dead && !(current->flags & PF_EXITING) && - !io_sq_thread_acquire_mm_files(ctx, req)) + if (!ctx->sqo_dead && !(current->flags & PF_EXITING)) __io_queue_sqe(req); else __io_req_task_cancel(req, -EFAULT); mutex_unlock(&ctx->uring_lock); - - if (ctx->flags & IORING_SETUP_SQPOLL) - io_sq_thread_drop_mm_files(); } static void io_req_task_submit(struct callback_head *cb) @@ -2604,7 +2488,6 @@ static bool io_rw_reissue(struct io_kiocb *req) { #ifdef CONFIG_BLOCK umode_t mode = file_inode(req->file)->i_mode; - int ret; if (!S_ISBLK(mode) && !S_ISREG(mode)) return false; @@ -2613,9 +2496,7 @@ static bool io_rw_reissue(struct io_kiocb *req) lockdep_assert_held(&req->ctx->uring_lock); - ret = io_sq_thread_acquire_mm_files(req->ctx, req); - - if (!ret && io_resubmit_prep(req)) { + if (io_resubmit_prep(req)) { refcount_inc(&req->refs); io_queue_async_work(req); return true; @@ -6461,9 +6342,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (unlikely(req->opcode >= IORING_OP_LAST)) return -EINVAL; - if (unlikely(io_sq_thread_acquire_mm_files(ctx, req))) - return -EFAULT; - if (unlikely(!io_check_restriction(ctx, req, sqe_flags))) return -EACCES; @@ -6779,71 +6657,97 @@ static void io_sqd_init_new(struct io_sq_data *sqd) io_sqd_update_thread_idle(sqd); } +static bool io_sq_thread_should_stop(struct io_sq_data *sqd) +{ + return test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); +} + +static bool io_sq_thread_should_park(struct io_sq_data *sqd) +{ + return test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); +} + +static void io_sq_thread_parkme(struct io_sq_data *sqd) +{ + for (;;) { + /* + * TASK_PARKED is a special state; we must serialize against + * possible pending wakeups to avoid store-store collisions on + * task->state. + * + * Such a collision might possibly result in the task state + * changin from TASK_PARKED and us failing the + * wait_task_inactive() in kthread_park(). + */ + set_special_state(TASK_PARKED); + if (!test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) + break; + + /* + * Thread is going to call schedule(), do not preempt it, + * or the caller of kthread_park() may spend more time in + * wait_task_inactive(). + */ + preempt_disable(); + complete(&sqd->completion); + schedule_preempt_disabled(); + preempt_enable(); + } + __set_current_state(TASK_RUNNING); +} + static int io_sq_thread(void *data) { - struct cgroup_subsys_state *cur_css = NULL; - struct files_struct *old_files = current->files; - struct nsproxy *old_nsproxy = current->nsproxy; - const struct cred *old_cred = NULL; struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; unsigned long timeout = 0; + char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); - task_lock(current); - current->files = NULL; - current->nsproxy = NULL; - task_unlock(current); + sprintf(buf, "iou-sqp-%d", sqd->task_pid); + set_task_comm(current, buf); + sqd->thread = current; + current->pf_io_worker = NULL; + + if (sqd->sq_cpu != -1) + set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); + else + set_cpus_allowed_ptr(current, cpu_online_mask); + current->flags |= PF_NO_SETAFFINITY; + + complete(&sqd->completion); - while (!kthread_should_stop()) { + wait_for_completion(&sqd->startup); + + while (!io_sq_thread_should_stop(sqd)) { int ret; bool cap_entries, sqt_spin, needs_sched; /* * Any changes to the sqd lists are synchronized through the - * kthread parking. This synchronizes the thread vs users, + * thread parking. This synchronizes the thread vs users, * the users are synchronized on the sqd->ctx_lock. */ - if (kthread_should_park()) { - kthread_parkme(); - /* - * When sq thread is unparked, in case the previous park operation - * comes from io_put_sq_data(), which means that sq thread is going - * to be stopped, so here needs to have a check. - */ - if (kthread_should_stop()) - break; + if (io_sq_thread_should_park(sqd)) { + io_sq_thread_parkme(sqd); + continue; } - if (unlikely(!list_empty(&sqd->ctx_new_list))) { io_sqd_init_new(sqd); timeout = jiffies + sqd->sq_thread_idle; } - + if (fatal_signal_pending(current)) + break; sqt_spin = false; cap_entries = !list_is_singular(&sqd->ctx_list); list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { - if (current->cred != ctx->creds) { - if (old_cred) - revert_creds(old_cred); - old_cred = override_creds(ctx->creds); - } - io_sq_thread_associate_blkcg(ctx, &cur_css); -#ifdef CONFIG_AUDIT - current->loginuid = ctx->loginuid; - current->sessionid = ctx->sessionid; -#endif - ret = __io_sq_thread(ctx, cap_entries); if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list))) sqt_spin = true; - - io_sq_thread_drop_mm_files(); } if (sqt_spin || !time_after(jiffies, timeout)) { io_run_task_work(); - io_sq_thread_drop_mm_files(); cond_resched(); if (sqt_spin) timeout = jiffies + sqd->sq_thread_idle; @@ -6864,7 +6768,7 @@ static int io_sq_thread(void *data) } } - if (needs_sched && !kthread_should_park()) { + if (needs_sched && !io_sq_thread_should_park(sqd)) { list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) io_ring_set_wakeup_flag(ctx); @@ -6877,22 +6781,21 @@ static int io_sq_thread(void *data) timeout = jiffies + sqd->sq_thread_idle; } - io_run_task_work(); - io_sq_thread_drop_mm_files(); + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + io_uring_cancel_sqpoll(ctx); - if (cur_css) - io_sq_thread_unassociate_blkcg(); - if (old_cred) - revert_creds(old_cred); - - task_lock(current); - current->files = old_files; - current->nsproxy = old_nsproxy; - task_unlock(current); + io_run_task_work(); - kthread_parkme(); + /* + * Clear thread under lock so that concurrent parks work correctly + */ + complete_all(&sqd->completion); + mutex_lock(&sqd->lock); + sqd->thread = NULL; + mutex_unlock(&sqd->lock); - return 0; + complete(&sqd->exited); + do_exit(0); } struct io_wait_queue { @@ -7182,20 +7085,73 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) return 0; } +static void io_sq_thread_unpark(struct io_sq_data *sqd) + __releases(&sqd->lock) +{ + if (!sqd->thread) + return; + if (sqd->thread == current) + return; + clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + wake_up_state(sqd->thread, TASK_PARKED); + mutex_unlock(&sqd->lock); +} + +static bool io_sq_thread_park(struct io_sq_data *sqd) + __acquires(&sqd->lock) +{ + if (sqd->thread == current) + return true; + mutex_lock(&sqd->lock); + if (!sqd->thread) { + mutex_unlock(&sqd->lock); + return false; + } + set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + wake_up_process(sqd->thread); + wait_for_completion(&sqd->completion); + return true; +} + +static void io_sq_thread_stop(struct io_sq_data *sqd) +{ + if (!sqd->thread) + return; + + set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); + WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)); + wake_up_process(sqd->thread); + wait_for_completion(&sqd->exited); +} + static void io_put_sq_data(struct io_sq_data *sqd) { if (refcount_dec_and_test(&sqd->refs)) { - /* - * The park is a bit of a work-around, without it we get - * warning spews on shutdown with SQPOLL set and affinity - * set to a single CPU. - */ + io_sq_thread_stop(sqd); + kfree(sqd); + } +} + +static void io_sq_thread_finish(struct io_ring_ctx *ctx) +{ + struct io_sq_data *sqd = ctx->sq_data; + + if (sqd) { if (sqd->thread) { - kthread_park(sqd->thread); - kthread_stop(sqd->thread); + wait_for_completion(&ctx->sq_thread_comp); + io_sq_thread_park(sqd); } - kfree(sqd); + mutex_lock(&sqd->ctx_lock); + list_del(&ctx->sqd_list); + io_sqd_update_thread_idle(sqd); + mutex_unlock(&sqd->ctx_lock); + + if (sqd->thread) + io_sq_thread_unpark(sqd); + + io_put_sq_data(sqd); + ctx->sq_data = NULL; } } @@ -7242,58 +7198,12 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p) mutex_init(&sqd->ctx_lock); mutex_init(&sqd->lock); init_waitqueue_head(&sqd->wait); + init_completion(&sqd->startup); + init_completion(&sqd->completion); + init_completion(&sqd->exited); return sqd; } -static void io_sq_thread_unpark(struct io_sq_data *sqd) - __releases(&sqd->lock) -{ - if (!sqd->thread) - return; - kthread_unpark(sqd->thread); - mutex_unlock(&sqd->lock); -} - -static void io_sq_thread_park(struct io_sq_data *sqd) - __acquires(&sqd->lock) -{ - if (!sqd->thread) - return; - mutex_lock(&sqd->lock); - kthread_park(sqd->thread); -} - -static void io_sq_thread_stop(struct io_ring_ctx *ctx) -{ - struct io_sq_data *sqd = ctx->sq_data; - - if (sqd) { - if (sqd->thread) { - /* - * We may arrive here from the error branch in - * io_sq_offload_create() where the kthread is created - * without being waked up, thus wake it up now to make - * sure the wait will complete. - */ - wake_up_process(sqd->thread); - wait_for_completion(&ctx->sq_thread_comp); - - io_sq_thread_park(sqd); - } - - mutex_lock(&sqd->ctx_lock); - list_del(&ctx->sqd_list); - io_sqd_update_thread_idle(sqd); - mutex_unlock(&sqd->ctx_lock); - - if (sqd->thread) - io_sq_thread_unpark(sqd); - - io_put_sq_data(sqd); - ctx->sq_data = NULL; - } -} - #if defined(CONFIG_UNIX) /* * Ensure the UNIX gc is aware of our file set, so we are certain that @@ -7969,17 +7879,20 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, if (!cpu_online(cpu)) goto err; - sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd, - cpu, "io_uring-sq"); + sqd->sq_cpu = cpu; } else { - sqd->thread = kthread_create(io_sq_thread, sqd, - "io_uring-sq"); + sqd->sq_cpu = -1; } - if (IS_ERR(sqd->thread)) { - ret = PTR_ERR(sqd->thread); + + sqd->task_pid = current->pid; + current->flags |= PF_IO_WORKER; + ret = io_wq_fork_thread(io_sq_thread, sqd); + current->flags &= ~PF_IO_WORKER; + if (ret < 0) { sqd->thread = NULL; goto err; } + wait_for_completion(&sqd->completion); ret = io_uring_alloc_task_context(sqd->thread, ctx); if (ret) goto err; @@ -7991,7 +7904,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, return 0; err: - io_sq_thread_stop(ctx); + io_sq_thread_finish(ctx); return ret; } @@ -8000,7 +7913,7 @@ static void io_sq_offload_start(struct io_ring_ctx *ctx) struct io_sq_data *sqd = ctx->sq_data; if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread) - wake_up_process(sqd->thread); + complete(&sqd->startup); } static inline void __io_unaccount_mem(struct user_struct *user, @@ -8466,21 +8379,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock); - io_sq_thread_stop(ctx); + io_sq_thread_finish(ctx); io_sqe_buffers_unregister(ctx); - if (ctx->sqo_task) { - put_task_struct(ctx->sqo_task); - ctx->sqo_task = NULL; + if (ctx->mm_account) { mmdrop(ctx->mm_account); ctx->mm_account = NULL; } -#ifdef CONFIG_BLK_CGROUP - if (ctx->sqo_blkcg_css) - css_put(ctx->sqo_blkcg_css); -#endif - mutex_lock(&ctx->uring_lock); io_sqe_files_unregister(ctx); mutex_unlock(&ctx->uring_lock); @@ -8500,7 +8406,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) percpu_ref_exit(&ctx->refs); free_uid(ctx->user); - put_cred(ctx->creds); io_req_caches_free(ctx, NULL); kfree(ctx->cancel_hash); kfree(ctx); @@ -8793,12 +8698,15 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, struct files_struct *files) { struct task_struct *task = current; + bool did_park = false; if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { io_disable_sqo_submit(ctx); - task = ctx->sq_data->thread; - atomic_inc(&task->io_uring->in_idle); - io_sq_thread_park(ctx->sq_data); + did_park = io_sq_thread_park(ctx->sq_data); + if (did_park) { + task = ctx->sq_data->thread; + atomic_inc(&task->io_uring->in_idle); + } } io_cancel_defer_files(ctx, task, files); @@ -8807,7 +8715,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, if (!files) io_uring_try_cancel_requests(ctx, task, NULL); - if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { + if (did_park) { atomic_dec(&task->io_uring->in_idle); io_sq_thread_unpark(ctx->sq_data); } @@ -8907,14 +8815,17 @@ static s64 tctx_inflight(struct io_uring_task *tctx) static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx) { + struct io_sq_data *sqd = ctx->sq_data; struct io_uring_task *tctx; s64 inflight; DEFINE_WAIT(wait); - if (!ctx->sq_data) + if (!sqd) return; - tctx = ctx->sq_data->thread->io_uring; io_disable_sqo_submit(ctx); + if (!io_sq_thread_park(sqd)) + return; + tctx = ctx->sq_data->thread->io_uring; atomic_inc(&tctx->in_idle); do { @@ -8935,6 +8846,7 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx) finish_wait(&tctx->wait, &wait); } while (1); atomic_dec(&tctx->in_idle); + io_sq_thread_unpark(sqd); } /* @@ -9511,12 +9423,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ctx->compat = in_compat_syscall(); ctx->limit_mem = !capable(CAP_IPC_LOCK); ctx->user = user; - ctx->creds = get_current_cred(); -#ifdef CONFIG_AUDIT - ctx->loginuid = current->loginuid; - ctx->sessionid = current->sessionid; -#endif - ctx->sqo_task = get_task_struct(current); + ctx->sqo_task = current; /* * This is just grabbed for accounting purposes. When a process exits, @@ -9527,24 +9434,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, mmgrab(current->mm); ctx->mm_account = current->mm; -#ifdef CONFIG_BLK_CGROUP - /* - * The sq thread will belong to the original cgroup it was inited in. - * If the cgroup goes offline (e.g. disabling the io controller), then - * issued bios will be associated with the closest cgroup later in the - * block layer. - */ - rcu_read_lock(); - ctx->sqo_blkcg_css = blkcg_css(); - ret = css_tryget_online(ctx->sqo_blkcg_css); - rcu_read_unlock(); - if (!ret) { - /* don't init against a dying cgroup, have the user try again */ - ctx->sqo_blkcg_css = NULL; - ret = -ENODEV; - goto err; - } -#endif ret = io_allocate_scq_urings(ctx, p); if (ret) goto err; -- cgit v1.2.3 From 9e8d9e829c2142cf1d7756e9ed2e0b4c7569d84c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 15 Feb 2021 13:42:18 -0700 Subject: Revert "proc: don't allow async path resolution of /proc/thread-self components" This reverts commit 0d4370cfe36b7f1719123b621a4ec4d9c7a25f89. No longer needed, as the io-wq worker threads have the right identity. Signed-off-by: Jens Axboe --- fs/proc/self.c | 2 +- fs/proc/thread_self.c | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/proc/self.c b/fs/proc/self.c index a4012154e109..cc71ce3466dc 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -20,7 +20,7 @@ static const char *proc_self_get_link(struct dentry *dentry, * Not currently supported. Once we can inherit all of struct pid, * we can allow this. */ - if (current->flags & PF_IO_WORKER) + if (current->flags & PF_KTHREAD) return ERR_PTR(-EOPNOTSUPP); if (!tgid) diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index d56681d86d28..a553273fbd41 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -17,13 +17,6 @@ static const char *proc_thread_self_get_link(struct dentry *dentry, pid_t pid = task_pid_nr_ns(current, ns); char *name; - /* - * Not currently supported. Once we can inherit all of struct pid, - * we can allow this. - */ - if (current->flags & PF_IO_WORKER) - return ERR_PTR(-EOPNOTSUPP); - if (!pid) return ERR_PTR(-ENOENT); name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC); -- cgit v1.2.3 From 2587890b5e2892dfecaa5e5126bdac8076a4e6f7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 15 Feb 2021 13:42:42 -0700 Subject: Revert "proc: don't allow async path resolution of /proc/self components" This reverts commit 8d4c3e76e3be11a64df95ddee52e99092d42fc19. No longer needed, as the io-wq worker threads have the right identity. Signed-off-by: Jens Axboe --- fs/proc/self.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'fs') diff --git a/fs/proc/self.c b/fs/proc/self.c index cc71ce3466dc..72cd69bcaf4a 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -16,13 +16,6 @@ static const char *proc_self_get_link(struct dentry *dentry, pid_t tgid = task_tgid_nr_ns(current, ns); char *name; - /* - * Not currently supported. Once we can inherit all of struct pid, - * we can allow this. - */ - if (current->flags & PF_KTHREAD) - return ERR_PTR(-EOPNOTSUPP); - if (!tgid) return ERR_PTR(-ENOENT); /* max length of unsigned int in decimal + NULL term */ -- cgit v1.2.3 From 1c0aa1fae1acb77c5f9917adb0e4cb4500b9f3a6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 20 Feb 2021 11:55:28 -0700 Subject: io_uring: flag new native workers with IORING_FEAT_NATIVE_WORKERS A few reasons to do this: - The naming of the manager and worker have changed. That's a user visible change, so makes sense to flag it. - Opening certain files that use ->signal (like /proc/self or /dev/tty) now works, and the flag tells the application upfront that this is the case. - Related to the above, using signalfd will now work as well. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- include/uapi/linux/io_uring.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 378cf79e66c9..cf9a5fa1ad03 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9467,7 +9467,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | - IORING_FEAT_EXT_ARG; + IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index ac4e1738a9af..2514eb6b1cf2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -262,6 +262,7 @@ struct io_uring_params { #define IORING_FEAT_POLL_32BITS (1U << 6) #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) #define IORING_FEAT_EXT_ARG (1U << 8) +#define IORING_FEAT_NATIVE_WORKERS (1U << 9) /* * io_uring_register(2) opcodes and arguments -- cgit v1.2.3 From 728f13e730093d0b3a1317d2ada83c2538941f34 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 21 Feb 2021 16:02:53 -0700 Subject: io-wq: remove nr_process accounting We're now just using fork like we would from userspace, so there's no need to try and impose extra restrictions or accounting on the user side of things. That's already being done for us. That also means we don't have to pass in the user_struct anymore, that's correctly inherited through ->creds on fork. Signed-off-by: Jens Axboe --- fs/io-wq.c | 48 +----------------------------------------------- fs/io-wq.h | 2 -- fs/io_uring.c | 1 - 3 files changed, 1 insertion(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index b0d09f60200b..b5ae8080a41e 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -172,8 +172,6 @@ static void io_worker_exit(struct io_worker *worker) worker->flags = 0; if (flags & IO_WORKER_F_RUNNING) atomic_dec(&acct->nr_running); - if (!(flags & IO_WORKER_F_BOUND)) - atomic_dec(&wqe->wq->user->processes); worker->flags = 0; preempt_enable(); @@ -299,12 +297,10 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, worker->flags |= IO_WORKER_F_BOUND; wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++; - atomic_dec(&wqe->wq->user->processes); } else { worker->flags &= ~IO_WORKER_F_BOUND; wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++; wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; - atomic_inc(&wqe->wq->user->processes); } io_wqe_inc_running(worker); } @@ -575,9 +571,6 @@ static int task_thread(void *data, int index) acct->nr_workers++; raw_spin_unlock_irq(&wqe->lock); - if (index == IO_WQ_ACCT_UNBOUND) - atomic_inc(&wq->user->processes); - io_wqe_worker(data); do_exit(0); } @@ -730,29 +723,6 @@ static int io_wq_manager(void *data) do_exit(0); } -static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, - struct io_wq_work *work) -{ - bool free_worker; - - if (!(work->flags & IO_WQ_WORK_UNBOUND)) - return true; - if (atomic_read(&acct->nr_running)) - return true; - - rcu_read_lock(); - free_worker = !hlist_nulls_empty(&wqe->free_list); - rcu_read_unlock(); - if (free_worker) - return true; - - if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers && - !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN))) - return false; - - return true; -} - static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) { struct io_wq *wq = wqe->wq; @@ -790,17 +760,6 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) int work_flags; unsigned long flags; - /* - * Do early check to see if we need a new unbound worker, and if we do, - * if we're allowed to do so. This isn't 100% accurate as there's a - * gap between this check and incrementing the value, but that's OK. - * It's close enough to not be an issue, fork() has the same delay. - */ - if (unlikely(!io_wq_can_queue(wqe, acct, work))) { - io_run_cancel(work, wqe); - return; - } - work_flags = work->flags; raw_spin_lock_irqsave(&wqe->lock, flags); io_wqe_insert_work(wqe, work); @@ -978,9 +937,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) wq->free_work = data->free_work; wq->do_work = data->do_work; - /* caller must already hold a reference to this */ - wq->user = data->user; - ret = -ENOMEM; for_each_node(node) { struct io_wqe *wqe; @@ -995,10 +951,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) wqe->node = alloc_node; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); - if (wq->user) { - wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = + wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = task_rlimit(current, RLIMIT_NPROC); - } atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); wqe->wq = wq; raw_spin_lock_init(&wqe->lock); diff --git a/fs/io-wq.h b/fs/io-wq.h index 23f6cbd620f8..86825673be08 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -94,8 +94,6 @@ typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); typedef void (io_wq_work_fn)(struct io_wq_work *); struct io_wq_data { - struct user_struct *user; - io_wq_work_fn *do_work; free_work_fn *free_work; }; diff --git a/fs/io_uring.c b/fs/io_uring.c index cf9a5fa1ad03..49a636d291cf 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7767,7 +7767,6 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx) struct io_wq_data data; unsigned int concurrency; - data.user = ctx->user; data.free_work = io_free_work; data.do_work = io_wq_submit_work; -- cgit v1.2.3 From 62e398be275a6c6efefe117b8960ae4e40e047cd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 21 Feb 2021 16:19:37 -0700 Subject: io_uring: cleanup ->user usage At this point we're only using it for memory accounting, so there's no need to have an extra ->limit_mem - we can just set ->user if we do the accounting, or leave it at NULL if we don't. Signed-off-by: Jens Axboe --- fs/io_uring.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 49a636d291cf..e62ad6bde569 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -334,7 +334,6 @@ struct io_ring_ctx { struct { unsigned int flags; unsigned int compat: 1; - unsigned int limit_mem: 1; unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; @@ -7230,7 +7229,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) skb->sk = sk; nr_files = 0; - fpl->user = get_uid(ctx->user); + fpl->user = get_uid(current_user()); for (i = 0; i < nr; i++) { struct file *file = io_file_from_index(ctx, i + offset); @@ -7942,7 +7941,7 @@ static inline int __io_account_mem(struct user_struct *user, static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) { - if (ctx->limit_mem) + if (ctx->user) __io_unaccount_mem(ctx->user, nr_pages); if (ctx->mm_account) @@ -7953,7 +7952,7 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) { int ret; - if (ctx->limit_mem) { + if (ctx->user) { ret = __io_account_mem(ctx->user, nr_pages); if (ret) return ret; @@ -9370,7 +9369,6 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) static int io_uring_create(unsigned entries, struct io_uring_params *p, struct io_uring_params __user *params) { - struct user_struct *user = NULL; struct io_ring_ctx *ctx; struct file *file; int ret; @@ -9412,16 +9410,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->cq_entries = 2 * p->sq_entries; } - user = get_uid(current_user()); - ctx = io_ring_ctx_alloc(p); - if (!ctx) { - free_uid(user); + if (!ctx) return -ENOMEM; - } ctx->compat = in_compat_syscall(); - ctx->limit_mem = !capable(CAP_IPC_LOCK); - ctx->user = user; + if (!capable(CAP_IPC_LOCK)) + ctx->user = get_uid(current_user()); ctx->sqo_task = current; /* -- cgit v1.2.3 From 8a378fb096a7f02943c72a428bbfd0029260efb6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 23 Feb 2021 12:27:49 -0700 Subject: io_uring: ensure io-wq context is always destroyed for tasks If the task ends up doing no IO, the context list is empty and we don't call into __io_uring_files_cancel() when the task exits. This can cause a leak of the io-wq structures. Ensure we always call __io_uring_files_cancel(), even if the task context list is empty. Fixes: 5aa75ed5b93f ("io_uring: tie async worker side to the task context") Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 ++++--- include/linux/io_uring.h | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index e62ad6bde569..0a435a6f265a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8800,9 +8800,10 @@ void __io_uring_files_cancel(struct files_struct *files) if (files) { io_uring_remove_task_files(tctx); - } else if (tctx->io_wq && current->flags & PF_EXITING) { - io_wq_destroy(tctx->io_wq); - tctx->io_wq = NULL; + if (tctx->io_wq) { + io_wq_destroy(tctx->io_wq); + tctx->io_wq = NULL; + } } } diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index c48fcbdc2ea8..51ede771cd99 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -43,7 +43,7 @@ static inline void io_uring_task_cancel(void) } static inline void io_uring_files_cancel(struct files_struct *files) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) + if (current->io_uring) __io_uring_files_cancel(files); } static inline void io_uring_free(struct task_struct *tsk) -- cgit v1.2.3 From 8b3e78b5955abb98863832453f5c74eca8f53c3a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 23 Feb 2021 15:34:06 -0700 Subject: io-wq: fix races around manager/worker creation and task exit These races have always been there, they are just more apparent now that we do early cancel of io-wq when the task exits. Ensure that the io-wq manager sets task state correctly to not miss wakeups for task creation. This is important if we get a wakeup after having marked ourselves as TASK_INTERRUPTIBLE. If we do end up creating workers, then we flip the state back to running, making the subsequent schedule() a no-op. Also increment the wq ref count before forking the thread, to avoid a use-after-free. Signed-off-by: Jens Axboe --- fs/io-wq.c | 57 +++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index b5ae8080a41e..0ce5057c3bf7 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -605,6 +605,8 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) struct io_worker *worker; pid_t pid; + __set_current_state(TASK_RUNNING); + worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); if (!worker) return false; @@ -614,15 +616,18 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) worker->wqe = wqe; spin_lock_init(&worker->lock); + refcount_inc(&wq->refs); + if (index == IO_WQ_ACCT_BOUND) pid = io_wq_fork_thread(task_thread_bound, worker); else pid = io_wq_fork_thread(task_thread_unbound, worker); if (pid < 0) { + if (refcount_dec_and_test(&wq->refs)) + complete(&wq->done); kfree(worker); return false; } - refcount_inc(&wq->refs); return true; } @@ -668,6 +673,30 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data) return false; } +static void io_wq_check_workers(struct io_wq *wq) +{ + int node; + + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + bool fork_worker[2] = { false, false }; + + if (!node_online(node)) + continue; + + raw_spin_lock_irq(&wqe->lock); + if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) + fork_worker[IO_WQ_ACCT_BOUND] = true; + if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) + fork_worker[IO_WQ_ACCT_UNBOUND] = true; + raw_spin_unlock_irq(&wqe->lock); + if (fork_worker[IO_WQ_ACCT_BOUND]) + create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); + if (fork_worker[IO_WQ_ACCT_UNBOUND]) + create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND); + } +} + /* * Manager thread. Tasked with creating new workers, if we need them. */ @@ -684,30 +713,15 @@ static int io_wq_manager(void *data) complete(&wq->done); - while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - bool fork_worker[2] = { false, false }; - - if (!node_online(node)) - continue; - - raw_spin_lock_irq(&wqe->lock); - if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) - fork_worker[IO_WQ_ACCT_BOUND] = true; - if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) - fork_worker[IO_WQ_ACCT_UNBOUND] = true; - raw_spin_unlock_irq(&wqe->lock); - if (fork_worker[IO_WQ_ACCT_BOUND]) - create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); - if (fork_worker[IO_WQ_ACCT_UNBOUND]) - create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND); - } + do { set_current_state(TASK_INTERRUPTIBLE); + io_wq_check_workers(wq); schedule_timeout(HZ); if (fatal_signal_pending(current)) set_bit(IO_WQ_BIT_EXIT, &wq->state); - } + } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)); + + io_wq_check_workers(wq); if (refcount_dec_and_test(&wq->refs)) { complete(&wq->done); @@ -970,7 +984,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) current->flags &= ~PF_IO_WORKER; if (ret >= 0) { wait_for_completion(&wq->done); - reinit_completion(&wq->done); return wq; } -- cgit v1.2.3 From eb2de9418d56b5e6ebf27bad51dbce3e22ee109b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 23 Feb 2021 19:59:06 -0700 Subject: io-wq: fix race around io_worker grabbing There's a small window between lookup dropping the reference to the worker and calling wake_up_process() on the worker task, where the worker itself could have exited. We ensure that the worker struct itself is valid, but worker->task may very well be gone by the time we issue the wakeup. Fix the race by using a completion triggered by the reference going to zero, and having exit wait for that completion before proceeding. Signed-off-by: Jens Axboe --- fs/io-wq.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index 0ce5057c3bf7..a53df2b3762a 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -56,6 +56,8 @@ struct io_worker { const struct cred *cur_creds; const struct cred *saved_creds; + struct completion ref_done; + struct rcu_head rcu; }; @@ -129,7 +131,7 @@ static bool io_worker_get(struct io_worker *worker) static void io_worker_release(struct io_worker *worker) { if (refcount_dec_and_test(&worker->ref)) - wake_up_process(worker->task); + complete(&worker->ref_done); } static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, @@ -157,14 +159,9 @@ static void io_worker_exit(struct io_worker *worker) struct io_wqe_acct *acct = io_wqe_get_acct(worker); unsigned flags; - /* - * If we're not at zero, someone else is holding a brief reference - * to the worker. Wait for that to go away. - */ - set_current_state(TASK_INTERRUPTIBLE); - if (!refcount_dec_and_test(&worker->ref)) - schedule(); - __set_current_state(TASK_RUNNING); + if (refcount_dec_and_test(&worker->ref)) + complete(&worker->ref_done); + wait_for_completion(&worker->ref_done); preempt_disable(); current->flags &= ~PF_IO_WORKER; @@ -615,6 +612,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) worker->nulls_node.pprev = NULL; worker->wqe = wqe; spin_lock_init(&worker->lock); + init_completion(&worker->ref_done); refcount_inc(&wq->refs); @@ -724,6 +722,7 @@ static int io_wq_manager(void *data) io_wq_check_workers(wq); if (refcount_dec_and_test(&wq->refs)) { + wq->manager = NULL; complete(&wq->done); do_exit(0); } @@ -734,6 +733,7 @@ static int io_wq_manager(void *data) io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); rcu_read_unlock(); } + wq->manager = NULL; do_exit(0); } -- cgit v1.2.3 From e941894eae31b52f0fd9bdb3ce20620afa152f45 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 19 Feb 2021 12:33:30 -0700 Subject: io-wq: make buffered file write hashed work map per-ctx Before the io-wq thread change, we maintained a hash work map and lock per-node per-ring. That wasn't ideal, as we really wanted it to be per ring. But now that we have per-task workers, the hash map ends up being just per-task. That'll work just fine for the normal case of having one task use a ring, but if you share the ring between tasks, then it's considerably worse than it was before. Make the hash map per ctx instead, which provides full per-ctx buffered write serialization on hashed writes. Signed-off-by: Jens Axboe --- fs/io-wq.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++-------- fs/io-wq.h | 14 ++++++++++ fs/io_uring.c | 19 +++++++++++++ 3 files changed, 107 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index a53df2b3762a..d28ad66b7f16 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -87,7 +87,6 @@ struct io_wqe { struct { raw_spinlock_t lock; struct io_wq_work_list work_list; - unsigned long hash_map; unsigned flags; } ____cacheline_aligned_in_smp; @@ -97,6 +96,8 @@ struct io_wqe { struct hlist_nulls_head free_list; struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq *wq; struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; }; @@ -113,6 +114,9 @@ struct io_wq { struct task_struct *manager; struct user_struct *user; + + struct io_wq_hash *hash; + refcount_t refs; struct completion done; @@ -328,14 +332,31 @@ static inline unsigned int io_get_work_hash(struct io_wq_work *work) return work->flags >> IO_WQ_HASH_SHIFT; } +static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash) +{ + struct io_wq *wq = wqe->wq; + + spin_lock(&wq->hash->wait.lock); + if (list_empty(&wqe->wait.entry)) { + __add_wait_queue(&wq->hash->wait, &wqe->wait); + if (!test_bit(hash, &wq->hash->map)) { + __set_current_state(TASK_RUNNING); + list_del_init(&wqe->wait.entry); + } + } + spin_unlock(&wq->hash->wait.lock); +} + static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) __must_hold(wqe->lock) { struct io_wq_work_node *node, *prev; struct io_wq_work *work, *tail; - unsigned int hash; + unsigned int stall_hash = -1U; wq_list_for_each(node, prev, &wqe->work_list) { + unsigned int hash; + work = container_of(node, struct io_wq_work, list); /* not hashed, can run anytime */ @@ -344,16 +365,26 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) return work; } - /* hashed, can run if not already running */ hash = io_get_work_hash(work); - if (!(wqe->hash_map & BIT(hash))) { - wqe->hash_map |= BIT(hash); - /* all items with this hash lie in [work, tail] */ - tail = wqe->hash_tail[hash]; + /* all items with this hash lie in [work, tail] */ + tail = wqe->hash_tail[hash]; + + /* hashed, can run if not already running */ + if (!test_and_set_bit(hash, &wqe->wq->hash->map)) { wqe->hash_tail[hash] = NULL; wq_list_cut(&wqe->work_list, &tail->list, prev); return work; } + if (stall_hash == -1U) + stall_hash = hash; + /* fast forward to a next hash, for-each will fix up @prev */ + node = &tail->list; + } + + if (stall_hash != -1U) { + raw_spin_unlock(&wqe->lock); + io_wait_on_hash(wqe, stall_hash); + raw_spin_lock(&wqe->lock); } return NULL; @@ -421,6 +452,7 @@ get_next: if (!work) break; io_assign_current_work(worker, work); + __set_current_state(TASK_RUNNING); /* handle a whole dependent link */ do { @@ -444,8 +476,10 @@ get_next: io_wqe_enqueue(wqe, linked); if (hash != -1U && !next_hashed) { + clear_bit(hash, &wq->hash->map); + if (wq_has_sleeper(&wq->hash->wait)) + wake_up(&wq->hash->wait); raw_spin_lock_irq(&wqe->lock); - wqe->hash_map &= ~BIT_ULL(hash); wqe->flags &= ~IO_WQE_FLAG_STALLED; /* skip unnecessary unlock-lock wqe->lock */ if (!work) @@ -471,7 +505,6 @@ static int io_wqe_worker(void *data) loop: raw_spin_lock_irq(&wqe->lock); if (io_wqe_run_queue(wqe)) { - __set_current_state(TASK_RUNNING); io_worker_handle_work(worker); goto loop; } @@ -928,6 +961,24 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, return IO_WQ_CANCEL_NOTFOUND; } +static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct io_wqe *wqe = container_of(wait, struct io_wqe, wait); + int ret; + + list_del_init(&wait->entry); + + rcu_read_lock(); + ret = io_wqe_activate_free_worker(wqe); + rcu_read_unlock(); + + if (!ret) + wake_up_process(wqe->wq->manager); + + return 1; +} + struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) { int ret = -ENOMEM, node; @@ -948,6 +999,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) if (ret) goto err_wqes; + refcount_inc(&data->hash->refs); + wq->hash = data->hash; wq->free_work = data->free_work; wq->do_work = data->do_work; @@ -968,6 +1021,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = task_rlimit(current, RLIMIT_NPROC); atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); + wqe->wait.func = io_wqe_hash_wake; + INIT_LIST_HEAD(&wqe->wait.entry); wqe->wq = wq; raw_spin_lock_init(&wqe->lock); INIT_WQ_LIST(&wqe->work_list); @@ -989,6 +1044,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) if (refcount_dec_and_test(&wq->refs)) complete(&wq->done); + io_wq_put_hash(data->hash); err: cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); for_each_node(node) @@ -1017,8 +1073,15 @@ void io_wq_destroy(struct io_wq *wq) wait_for_completion(&wq->done); - for_each_node(node) - kfree(wq->wqes[node]); + spin_lock_irq(&wq->hash->wait.lock); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + list_del_init(&wqe->wait.entry); + kfree(wqe); + } + spin_unlock_irq(&wq->hash->wait.lock); + io_wq_put_hash(wq->hash); kfree(wq->wqes); kfree(wq); } diff --git a/fs/io-wq.h b/fs/io-wq.h index 86825673be08..3677b39db015 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -1,6 +1,7 @@ #ifndef INTERNAL_IO_WQ_H #define INTERNAL_IO_WQ_H +#include #include struct io_wq; @@ -93,7 +94,20 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); typedef void (io_wq_work_fn)(struct io_wq_work *); +struct io_wq_hash { + refcount_t refs; + unsigned long map; + struct wait_queue_head wait; +}; + +static inline void io_wq_put_hash(struct io_wq_hash *hash) +{ + if (refcount_dec_and_test(&hash->refs)) + kfree(hash); +} + struct io_wq_data { + struct io_wq_hash *hash; io_wq_work_fn *do_work; free_work_fn *free_work; }; diff --git a/fs/io_uring.c b/fs/io_uring.c index 0a435a6f265a..fbc85afa9a87 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -360,6 +360,9 @@ struct io_ring_ctx { unsigned cached_cq_overflow; unsigned long sq_check_overflow; + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + struct list_head defer_list; struct list_head timeout_list; struct list_head cq_overflow_list; @@ -454,6 +457,8 @@ struct io_ring_ctx { /* exit task_work */ struct callback_head *exit_task_work; + struct wait_queue_head hash_wait; + /* Keep this last, we don't need it for the fast path */ struct work_struct exit_work; }; @@ -7763,9 +7768,21 @@ static struct io_wq_work *io_free_work(struct io_wq_work *work) static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx) { + struct io_wq_hash *hash; struct io_wq_data data; unsigned int concurrency; + hash = ctx->hash_map; + if (!hash) { + hash = kzalloc(sizeof(*hash), GFP_KERNEL); + if (!hash) + return ERR_PTR(-ENOMEM); + refcount_set(&hash->refs, 1); + init_waitqueue_head(&hash->wait); + ctx->hash_map = hash; + } + + data.hash = hash; data.free_work = io_free_work; data.do_work = io_wq_submit_work; @@ -8405,6 +8422,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) percpu_ref_exit(&ctx->refs); free_uid(ctx->user); io_req_caches_free(ctx, NULL); + if (ctx->hash_map) + io_wq_put_hash(ctx->hash_map); kfree(ctx->cancel_hash); kfree(ctx); } -- cgit v1.2.3 From eb85890b29e4d7ae1accdcfba35ed8b16ba9fb97 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 25 Feb 2021 10:13:29 -0700 Subject: io_uring: ensure SQPOLL startup is triggered before error shutdown syzbot reports the following hang: INFO: task syz-executor.0:12538 can't die for more than 143 seconds. task:syz-executor.0 state:D stack:28352 pid:12538 ppid: 8423 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread_finish+0x96/0x580 fs/io_uring.c:7152 io_sq_offload_create fs/io_uring.c:7929 [inline] io_uring_create fs/io_uring.c:9465 [inline] io_uring_setup+0x1fb2/0x2c20 fs/io_uring.c:9550 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae which is due to exiting after the SQPOLL thread has been created, but hasn't been started yet. Ensure that we always complete the startup side when waiting for it to exit. Reported-by: syzbot+c927c937cba8ef66dd4a@syzkaller.appspotmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index fbc85afa9a87..ef743594d34a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7141,6 +7141,7 @@ static void io_sq_thread_finish(struct io_ring_ctx *ctx) struct io_sq_data *sqd = ctx->sq_data; if (sqd) { + complete(&sqd->startup); if (sqd->thread) { wait_for_completion(&ctx->sq_thread_comp); io_sq_thread_park(sqd); @@ -7927,7 +7928,7 @@ static void io_sq_offload_start(struct io_ring_ctx *ctx) { struct io_sq_data *sqd = ctx->sq_data; - if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread) + if (ctx->flags & IORING_SETUP_SQPOLL) complete(&sqd->startup); } -- cgit v1.2.3 From 4fb6ac326204b3ab81e6e7a914ccd44d957c1d2c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 25 Feb 2021 10:17:09 -0700 Subject: io-wq: improve manager/worker handling over exec exec will cancel any threads, including the ones that io-wq is using. This isn't a problem, in fact we'd prefer it to be that way since it means we know that any async work cancels naturally without having to handle it proactively. But it does mean that we need to setup a new manager, as the manager and workers are gone. Handle this at queue time, and cancel work if we fail. Since the manager can go away without us noticing, ensure that the manager itself holds a reference to the 'wq' as well. Rename io_wq_destroy() to io_wq_put() to reflect that. In the future we can now simplify exec cancelation handling, for now just leave it the same. Signed-off-by: Jens Axboe --- fs/io-wq.c | 62 ++++++++++++++++++++++++++++++++++++++++------------------- fs/io-wq.h | 2 +- fs/io_uring.c | 4 ++-- 3 files changed, 45 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index d28ad66b7f16..a32b81bac8a2 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -189,8 +189,7 @@ static void io_worker_exit(struct io_worker *worker) raw_spin_unlock_irq(&wqe->lock); kfree_rcu(worker, rcu); - if (refcount_dec_and_test(&wqe->wq->refs)) - complete(&wqe->wq->done); + io_wq_put(wqe->wq); } static inline bool io_wqe_run_queue(struct io_wqe *wqe) @@ -654,8 +653,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) else pid = io_wq_fork_thread(task_thread_unbound, worker); if (pid < 0) { - if (refcount_dec_and_test(&wq->refs)) - complete(&wq->done); + io_wq_put(wq); kfree(worker); return false; } @@ -754,11 +752,6 @@ static int io_wq_manager(void *data) io_wq_check_workers(wq); - if (refcount_dec_and_test(&wq->refs)) { - wq->manager = NULL; - complete(&wq->done); - do_exit(0); - } /* if ERROR is set and we get here, we have workers to wake */ if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { rcu_read_lock(); @@ -767,6 +760,7 @@ static int io_wq_manager(void *data) rcu_read_unlock(); } wq->manager = NULL; + io_wq_put(wq); do_exit(0); } @@ -801,12 +795,40 @@ append: wq_list_add_after(&work->list, &tail->list, &wqe->work_list); } +static int io_wq_fork_manager(struct io_wq *wq) +{ + int ret; + + if (wq->manager) + return 0; + + clear_bit(IO_WQ_BIT_EXIT, &wq->state); + refcount_inc(&wq->refs); + current->flags |= PF_IO_WORKER; + ret = io_wq_fork_thread(io_wq_manager, wq); + current->flags &= ~PF_IO_WORKER; + if (ret >= 0) { + wait_for_completion(&wq->done); + return 0; + } + + io_wq_put(wq); + return ret; +} + static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) { struct io_wqe_acct *acct = io_work_get_acct(wqe, work); int work_flags; unsigned long flags; + /* Can only happen if manager creation fails after exec */ + if (unlikely(io_wq_fork_manager(wqe->wq))) { + work->flags |= IO_WQ_WORK_CANCEL; + wqe->wq->do_work(work); + return; + } + work_flags = work->flags; raw_spin_lock_irqsave(&wqe->lock, flags); io_wqe_insert_work(wqe, work); @@ -1034,16 +1056,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) init_completion(&wq->done); refcount_set(&wq->refs, 1); - current->flags |= PF_IO_WORKER; - ret = io_wq_fork_thread(io_wq_manager, wq); - current->flags &= ~PF_IO_WORKER; - if (ret >= 0) { - wait_for_completion(&wq->done); + ret = io_wq_fork_manager(wq); + if (!ret) return wq; - } - if (refcount_dec_and_test(&wq->refs)) - complete(&wq->done); + io_wq_put(wq); io_wq_put_hash(data->hash); err: cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); @@ -1056,7 +1073,7 @@ err_wq: return ERR_PTR(ret); } -void io_wq_destroy(struct io_wq *wq) +static void io_wq_destroy(struct io_wq *wq) { int node; @@ -1071,8 +1088,6 @@ void io_wq_destroy(struct io_wq *wq) io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); rcu_read_unlock(); - wait_for_completion(&wq->done); - spin_lock_irq(&wq->hash->wait.lock); for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; @@ -1084,6 +1099,13 @@ void io_wq_destroy(struct io_wq *wq) io_wq_put_hash(wq->hash); kfree(wq->wqes); kfree(wq); + +} + +void io_wq_put(struct io_wq *wq) +{ + if (refcount_dec_and_test(&wq->refs)) + io_wq_destroy(wq); } static bool io_wq_worker_affinity(struct io_worker *worker, void *data) diff --git a/fs/io-wq.h b/fs/io-wq.h index 3677b39db015..b6ca12b60c35 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -113,7 +113,7 @@ struct io_wq_data { }; struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); -void io_wq_destroy(struct io_wq *wq); +void io_wq_put(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); void io_wq_hash_work(struct io_wq_work *work, void *val); diff --git a/fs/io_uring.c b/fs/io_uring.c index ef743594d34a..f66a8137e125 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2024,7 +2024,7 @@ static void __io_req_task_submit(struct io_kiocb *req) /* ctx stays valid until unlock, even if we drop all ours ctx->refs */ mutex_lock(&ctx->uring_lock); - if (!ctx->sqo_dead && !(current->flags & PF_EXITING)) + if (!ctx->sqo_dead && !(current->flags & PF_EXITING) && !current->in_execve) __io_queue_sqe(req); else __io_req_task_cancel(req, -EFAULT); @@ -8821,7 +8821,7 @@ void __io_uring_files_cancel(struct files_struct *files) if (files) { io_uring_remove_task_files(tctx); if (tctx->io_wq) { - io_wq_destroy(tctx->io_wq); + io_wq_put(tctx->io_wq); tctx->io_wq = NULL; } } -- cgit v1.2.3 From 5f3f26f98ae484a3e187411f9ea8c88c00a65ffc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 25 Feb 2021 10:17:46 -0700 Subject: io_uring: fix SQPOLL thread handling over exec Just like the changes for io-wq, ensure that we re-fork the SQPOLL thread if the owner execs. Mark the ctx sq thread as sqo_exec if it dies, and the ring as needing a wakeup which will force the task to enter the kernel. When it does, setup the new thread and proceed as usual. Signed-off-by: Jens Axboe --- fs/io_uring.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index f66a8137e125..4d79732d7d6b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -339,6 +339,7 @@ struct io_ring_ctx { unsigned int eventfd_async: 1; unsigned int restricted: 1; unsigned int sqo_dead: 1; + unsigned int sqo_exec: 1; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -6796,6 +6797,10 @@ static int io_sq_thread(void *data) complete_all(&sqd->completion); mutex_lock(&sqd->lock); sqd->thread = NULL; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + ctx->sqo_exec = 1; + io_ring_set_wakeup_flag(ctx); + } mutex_unlock(&sqd->lock); complete(&sqd->exited); @@ -7840,6 +7845,25 @@ void __io_uring_free(struct task_struct *tsk) tsk->io_uring = NULL; } +static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx) +{ + int ret; + + clear_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); + reinit_completion(&sqd->completion); + ctx->sqo_dead = ctx->sqo_exec = 0; + sqd->task_pid = current->pid; + current->flags |= PF_IO_WORKER; + ret = io_wq_fork_thread(io_sq_thread, sqd); + current->flags &= ~PF_IO_WORKER; + if (ret < 0) { + sqd->thread = NULL; + return ret; + } + wait_for_completion(&sqd->completion); + return io_uring_alloc_task_context(sqd->thread, ctx); +} + static int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { @@ -9128,6 +9152,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (ctx->flags & IORING_SETUP_SQPOLL) { io_cqring_overflow_flush(ctx, false, NULL, NULL); + if (unlikely(ctx->sqo_exec)) { + ret = io_sq_thread_fork(ctx->sq_data, ctx); + if (ret) + goto out; + ctx->sqo_exec = 0; + } ret = -EOWNERDEAD; if (unlikely(ctx->sqo_dead)) goto out; @@ -9229,8 +9259,11 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) */ has_lock = mutex_trylock(&ctx->uring_lock); - if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) + if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { sq = ctx->sq_data; + if (!sq->thread) + sq = NULL; + } seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1); seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); -- cgit v1.2.3 From d6ce7f6761bf6d669d9c74ec5d3bd1bfe92380c5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 25 Feb 2021 10:19:43 -0700 Subject: io-wq: remove now unused IO_WQ_BIT_ERROR This flag is now dead, remove it. Fixes: 1cbd9c2bcf02 ("io-wq: don't create any IO workers upfront") Signed-off-by: Jens Axboe --- fs/io-wq.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'fs') diff --git a/fs/io-wq.c b/fs/io-wq.c index a32b81bac8a2..44e20248805a 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -32,7 +32,6 @@ enum { enum { IO_WQ_BIT_EXIT = 0, /* wq exiting */ - IO_WQ_BIT_ERROR = 1, /* error on setup */ }; enum { @@ -733,7 +732,6 @@ static int io_wq_manager(void *data) { struct io_wq *wq = data; char buf[TASK_COMM_LEN]; - int node; sprintf(buf, "iou-mgr-%d", wq->task_pid); set_task_comm(current, buf); @@ -751,14 +749,6 @@ static int io_wq_manager(void *data) } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)); io_wq_check_workers(wq); - - /* if ERROR is set and we get here, we have workers to wake */ - if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { - rcu_read_lock(); - for_each_node(node) - io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); - rcu_read_unlock(); - } wq->manager = NULL; io_wq_put(wq); do_exit(0); -- cgit v1.2.3