aboutsummaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe2022-06-21 14:34:15 -0600
committerJens Axboe2022-07-24 18:39:16 -0600
commitfe991a7688f894a74a6f6b4933bf6cd5fa086c1b (patch)
treeaf05e00dd77c161e15febf822521e320a4f6445a /io_uring
parent795bbbc8a9a1bbbafce762c706bfb5733c9d0426 (diff)
io_uring: move POLLFREE handling to separate function
We really don't care about this at all in terms of performance. Outside of having it already be marked unlikely(), shove it into a separate __cold function. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/poll.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index eba767594dee..fa25b88a7b93 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -325,6 +325,31 @@ static void io_poll_cancel_req(struct io_kiocb *req)
#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
+static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
+{
+ io_poll_mark_cancelled(req);
+ /* we have to kick tw in case it's not already */
+ io_poll_execute(req, 0);
+
+ /*
+ * If the waitqueue is being freed early but someone is already
+ * holds ownership over it, we have to tear down the request as
+ * best we can. That means immediately removing the request from
+ * its waitqueue and preventing all further accesses to the
+ * waitqueue via the request.
+ */
+ list_del_init(&poll->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon
+ * as req->head is NULL'ed out, the request can be
+ * completed and freed, since aio_poll_complete_work()
+ * will no longer need to take the waitqueue lock.
+ */
+ smp_store_release(&poll->head, NULL);
+ return 1;
+}
+
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
@@ -332,29 +357,8 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct io_poll *poll = container_of(wait, struct io_poll, wait);
__poll_t mask = key_to_poll(key);
- if (unlikely(mask & POLLFREE)) {
- io_poll_mark_cancelled(req);
- /* we have to kick tw in case it's not already */
- io_poll_execute(req, 0);
-
- /*
- * If the waitqueue is being freed early but someone is already
- * holds ownership over it, we have to tear down the request as
- * best we can. That means immediately removing the request from
- * its waitqueue and preventing all further accesses to the
- * waitqueue via the request.
- */
- list_del_init(&poll->wait.entry);
-
- /*
- * Careful: this *must* be the last step, since as soon
- * as req->head is NULL'ed out, the request can be
- * completed and freed, since aio_poll_complete_work()
- * will no longer need to take the waitqueue lock.
- */
- smp_store_release(&poll->head, NULL);
- return 1;
- }
+ if (unlikely(mask & POLLFREE))
+ return io_pollfree_wake(req, poll);
/* for instances that support it check for an event match first */
if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))