diff options
author | Christoph Hellwig | 2020-06-11 08:44:50 +0200 |
---|---|---|
committer | Jens Axboe | 2020-06-24 09:15:57 -0600 |
commit | 40d09b53bfc557af7481b9d80f060a7ac9c7d314 (patch) | |
tree | 9fb094b5a9a6db3728baad590c972c197e72ec6b /block/blk-mq.c | |
parent | 963395269c758641e1cb7208f3bdce6824ea608d (diff) |
blk-mq: add a new blk_mq_complete_request_remote API
This is a variant of blk_mq_complete_request_remote that only completes
the request if it needs to be bounced to another CPU or a softirq. If
the request can be completed locally the function returns false and lets
the driver complete it without requring and indirect function call.
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 45 |
1 files changed, 26 insertions, 19 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 961635b40999..b8738b3c6d06 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu) return 0; } -static void __blk_mq_complete_request(struct request *rq) + +static void __blk_mq_complete_request_remote(void *data) { + struct request *rq = data; + /* * For most of single queue controllers, there is only one irq vector * for handling I/O completion, and the only irq's affinity is set @@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq) rq->q->mq_ops->complete(rq); } -static void __blk_mq_complete_request_remote(void *data) -{ - __blk_mq_complete_request(data); -} - static inline bool blk_mq_complete_need_ipi(struct request *rq) { int cpu = raw_smp_processor_id(); @@ -672,14 +670,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } -/** - * blk_mq_complete_request - end I/O on a request - * @rq: the request being processed - * - * Description: - * Complete a request by scheduling the ->complete_rq operation. - **/ -void blk_mq_complete_request(struct request *rq) +bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); @@ -687,10 +678,8 @@ void blk_mq_complete_request(struct request *rq) * For a polled request, always complete locallly, it's pointless * to redirect the completion. */ - if (rq->cmd_flags & REQ_HIPRI) { - rq->q->mq_ops->complete(rq); - return; - } + if (rq->cmd_flags & REQ_HIPRI) + return false; if (blk_mq_complete_need_ipi(rq)) { rq->csd.func = __blk_mq_complete_request_remote; @@ -698,8 +687,26 @@ void blk_mq_complete_request(struct request *rq) rq->csd.flags = 0; smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); } else { - __blk_mq_complete_request(rq); + if (rq->q->nr_hw_queues > 1) + return false; + blk_mq_trigger_softirq(rq); } + + return true; +} +EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); + +/** + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed + * + * Description: + * Complete a request by scheduling the ->complete_rq operation. + **/ +void blk_mq_complete_request(struct request *rq) +{ + if (!blk_mq_complete_request_remote(rq)) + rq->q->mq_ops->complete(rq); } EXPORT_SYMBOL(blk_mq_complete_request); |