aboutsummaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe2022-05-27 10:55:07 -0600
committerJens Axboe2022-07-24 18:39:12 -0600
commitb9ba8a4463cd78d0aee520c4bf2569820ac29929 (patch)
tree7ffcd2042e9e3bec8f314b0e3b997cca4613b591 /io_uring
parentd9b57aa3cfc792ccac6858376c017dbea6cb2872 (diff)
io_uring: add support for level triggered poll
By default, the POLL_ADD command does edge triggered poll - if we get a non-zero mask on the initial poll attempt, we complete the request successfully. Support level triggered by always waiting for a notification, regardless of whether or not the initial mask matches the file state. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/poll.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index b80f7fa26123..558dc170468a 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -423,11 +423,13 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
atomic_set(&req->poll_refs, 1);
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
- if (mask && (poll->events & EPOLLONESHOT)) {
+ if (mask &&
+ ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
io_poll_remove_entries(req);
/* no one else has access to the req, forget about the ref */
return mask;
}
+
if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
io_poll_remove_entries(req);
if (!ipt->error)
@@ -439,7 +441,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
io_poll_req_insert(req);
spin_unlock(&ctx->completion_lock);
- if (mask) {
+ if (mask && (poll->events & EPOLLET)) {
/* can't multishot if failed, just queue the event we've got */
if (unlikely(ipt->error || !ipt->nr_entries)) {
poll->events |= EPOLLONESHOT;
@@ -475,7 +477,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
struct io_ring_ctx *ctx = req->ctx;
struct async_poll *apoll;
struct io_poll_table ipt;
- __poll_t mask = POLLPRI | POLLERR;
+ __poll_t mask = POLLPRI | POLLERR | EPOLLET;
int ret;
if (!def->pollin && !def->pollout)
@@ -638,7 +640,10 @@ static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
#endif
if (!(flags & IORING_POLL_ADD_MULTI))
events |= EPOLLONESHOT;
- return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
+ if (!(flags & IORING_POLL_ADD_LEVEL))
+ events |= EPOLLET;
+ return demangle_poll(events) |
+ (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
}
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -679,7 +684,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL;
flags = READ_ONCE(sqe->len);
- if (flags & ~IORING_POLL_ADD_MULTI)
+ if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
return -EINVAL;
if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
return -EINVAL;