diff options
author | Jens Axboe | 2018-11-26 08:21:49 -0700 |
---|---|---|
committer | Jens Axboe | 2018-11-26 08:25:40 -0700 |
commit | 1052b8ac5282daf35df331edcbdb645839d17e6a (patch) | |
tree | 416e6bcaea468503613abeaf1e9705fea5aa94f9 /block | |
parent | 1db4909e76f64a85f4aaa187f0f683f5c85a471d (diff) |
blk-mq: when polling for IO, look for any completion
If we want to support async IO polling, then we have to allow finding
completions that aren't just for the one we are looking for. Always pass
in -1 to the mq_ops->poll() helper, and have that return how many events
were found in this poll loop.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 13 | ||||
-rw-r--r-- | block/blk-mq.c | 71 |
2 files changed, 47 insertions, 37 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 04f5be473638..03c4202b69bf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1273,10 +1273,19 @@ blk_qc_t submit_bio(struct bio *bio) } EXPORT_SYMBOL(submit_bio); -bool blk_poll(struct request_queue *q, blk_qc_t cookie) +/** + * blk_poll - poll for IO completions + * @q: the queue + * @cookie: cookie passed back at IO submission time + * + * Description: + * Poll for completions on the passed in queue. Returns number of + * completed entries found. + */ +int blk_poll(struct request_queue *q, blk_qc_t cookie) { if (!q->poll_fn || !blk_qc_t_valid(cookie)) - return false; + return 0; if (current->plug) blk_flush_plug_list(current->plug, false); diff --git a/block/blk-mq.c b/block/blk-mq.c index b16204df65d1..ec6c79578332 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3285,15 +3285,12 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, return false; /* - * poll_nsec can be: + * If we get here, hybrid polling is enabled. Hence poll_nsec can be: * - * -1: don't ever hybrid sleep * 0: use half of prev avg * >0: use this specific value */ - if (q->poll_nsec == -1) - return false; - else if (q->poll_nsec > 0) + if (q->poll_nsec > 0) nsecs = q->poll_nsec; else nsecs = blk_mq_poll_nsecs(q, hctx, rq); @@ -3330,11 +3327,41 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, return true; } -static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) +static bool blk_mq_poll_hybrid(struct request_queue *q, + struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) { - struct request_queue *q = hctx->queue; + struct request *rq; + + if (q->poll_nsec == -1) + return false; + + if (!blk_qc_t_is_internal(cookie)) + rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); + else { + rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); + /* + * With scheduling, if the request has completed, we'll + * get a NULL return here, as we clear the sched tag when + * that happens. The request still remains valid, like always, + * so we should be safe with just the NULL check. + */ + if (!rq) + return false; + } + + return blk_mq_poll_hybrid_sleep(q, hctx, rq); +} + +static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie) +{ + struct blk_mq_hw_ctx *hctx; long state; + if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + return 0; + + hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; + /* * If we sleep, have the caller restart the poll loop to reset * the state. Like for the other success return cases, the @@ -3342,7 +3369,7 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) * the IO isn't complete, we'll get called again and will go * straight to the busy poll loop. */ - if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) + if (blk_mq_poll_hybrid(q, hctx, cookie)) return 1; hctx->poll_considered++; @@ -3353,7 +3380,7 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) hctx->poll_invoked++; - ret = q->mq_ops->poll(hctx, rq->tag); + ret = q->mq_ops->poll(hctx, -1U); if (ret > 0) { hctx->poll_success++; __set_current_state(TASK_RUNNING); @@ -3374,32 +3401,6 @@ static int __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) return 0; } -static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie) -{ - struct blk_mq_hw_ctx *hctx; - struct request *rq; - - if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - return 0; - - hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; - if (!blk_qc_t_is_internal(cookie)) - rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); - else { - rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); - /* - * With scheduling, if the request has completed, we'll - * get a NULL return here, as we clear the sched tag when - * that happens. The request still remains valid, like always, - * so we should be safe with just the NULL check. - */ - if (!rq) - return 0; - } - - return __blk_mq_poll(hctx, rq); -} - unsigned int blk_mq_rq_cpu(struct request *rq) { return rq->mq_ctx->cpu; |