diff options
author | Jens Axboe | 2023-02-17 08:27:23 -0700 |
---|---|---|
committer | Jens Axboe | 2023-02-22 09:57:23 -0700 |
commit | 8d664282a03fec09682f10252d3c785c2513691d (patch) | |
tree | 3111af79c32cf40d67591bcc6819ad1666f5934f /io_uring | |
parent | ce8e04f6e5d3b2d14cd00cc4c0b1cc8cbdcf4d12 (diff) |
io_uring: rename 'in_idle' to 'in_cancel'
This better describes what it does - it's incremented when the task is
currently undergoing a cancelation operation, due to exiting or exec'ing.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 18 | ||||
-rw-r--r-- | io_uring/tctx.c | 2 |
2 files changed, 10 insertions, 10 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index cbe06deb84ff..64e07df034d1 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -719,7 +719,7 @@ static void io_put_task_remote(struct task_struct *task, int nr) struct io_uring_task *tctx = task->io_uring; percpu_counter_sub(&tctx->inflight, nr); - if (unlikely(atomic_read(&tctx->in_idle))) + if (unlikely(atomic_read(&tctx->in_cancel))) wake_up(&tctx->wait); put_task_struct_many(task, nr); } @@ -1258,8 +1258,8 @@ void tctx_task_work(struct callback_head *cb) ctx_flush_and_put(ctx, &uring_locked); - /* relaxed read is enough as only the task itself sets ->in_idle */ - if (unlikely(atomic_read(&tctx->in_idle))) + /* relaxed read is enough as only the task itself sets ->in_cancel */ + if (unlikely(atomic_read(&tctx->in_cancel))) io_uring_drop_tctx_refs(current); trace_io_uring_task_work_run(tctx, count, loops); @@ -1291,7 +1291,7 @@ static void io_req_local_work_add(struct io_kiocb *req) /* needed for the following wake up */ smp_mb__after_atomic(); - if (unlikely(atomic_read(&req->task->io_uring->in_idle))) { + if (unlikely(atomic_read(&req->task->io_uring->in_cancel))) { io_move_task_work_from_local(ctx); goto put_ref; } @@ -2937,12 +2937,12 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb) work = container_of(cb, struct io_tctx_exit, task_work); /* - * When @in_idle, we're in cancellation and it's racy to remove the + * When @in_cancel, we're in cancellation and it's racy to remove the * node. It'll be removed by the end of cancellation, just ignore it. * tctx can be NULL if the queueing of this task_work raced with * work cancelation off the exec path. */ - if (tctx && !atomic_read(&tctx->in_idle)) + if (tctx && !atomic_read(&tctx->in_cancel)) io_uring_del_tctx_node((unsigned long)work->ctx); complete(&work->completion); } @@ -3210,7 +3210,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) if (tctx->io_wq) io_wq_exit_start(tctx->io_wq); - atomic_inc(&tctx->in_idle); + atomic_inc(&tctx->in_cancel); do { bool loop = false; @@ -3261,9 +3261,9 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) if (cancel_all) { /* * We shouldn't run task_works after cancel, so just leave - * ->in_idle set for normal exit. + * ->in_cancel set for normal exit. */ - atomic_dec(&tctx->in_idle); + atomic_dec(&tctx->in_cancel); /* for exec all current's requests should be gone, kill tctx */ __io_uring_free(current); } diff --git a/io_uring/tctx.c b/io_uring/tctx.c index 4324b1cf1f6a..3a8d1dd97e1b 100644 --- a/io_uring/tctx.c +++ b/io_uring/tctx.c @@ -83,7 +83,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task, xa_init(&tctx->xa); init_waitqueue_head(&tctx->wait); - atomic_set(&tctx->in_idle, 0); + atomic_set(&tctx->in_cancel, 0); atomic_set(&tctx->inflight_tracked, 0); task->io_uring = tctx; init_llist_head(&tctx->task_list); |