From bccf5e26d99c28980bd6ced474422a1b18402263 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:26 +0800 Subject: blk-mq: Record nr_active_requests per queue for when using shared sbitmap The per-hctx nr_active value can no longer be used to fairly assign a share of tag depth per request queue for when using a shared sbitmap, as it does not consider that the tags are shared tags over all hctx's. For this case, record the nr_active_requests per request_queue, and make the judgement based on that value. Co-developed-with: Kashyap Desai Signed-off-by: John Garry Tested-by: Don Brace #SCSI resv cmds patches used Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-core.c | 2 ++ block/blk-mq.c | 4 ++-- block/blk-mq.h | 26 ++++++++++++++++++++++++-- 3 files changed, 28 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 9f2a99abeeb9..093649bd252e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -543,6 +543,8 @@ struct request_queue *blk_alloc_queue(int node_id) q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; + atomic_set(&q->nr_active_requests_shared_sbitmap, 0); + timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); diff --git a/block/blk-mq.c b/block/blk-mq.c index 5e8fe795edf1..ffc5ad0c91b7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -519,7 +519,7 @@ void blk_mq_free_request(struct request *rq) ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) - atomic_dec(&hctx->nr_active); + __blk_mq_dec_active_requests(hctx); if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->backing_dev_info); @@ -1127,7 +1127,7 @@ static bool blk_mq_get_driver_tag(struct request *rq) if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && !(rq->rq_flags & RQF_MQ_INFLIGHT)) { rq->rq_flags |= RQF_MQ_INFLIGHT; - atomic_inc(&hctx->nr_active); + __blk_mq_inc_active_requests(hctx); } hctx->tags->rqs[rq->tag] = rq; return true; diff --git a/block/blk-mq.h b/block/blk-mq.h index 56dc37c21908..25ec73078e95 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -199,6 +199,28 @@ static inline bool blk_mq_get_dispatch_budget(struct request_queue *q) return true; } +static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) + atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); + else + atomic_inc(&hctx->nr_active); +} + +static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) + atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); + else + atomic_dec(&hctx->nr_active); +} + +static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) + return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); + return atomic_read(&hctx->nr_active); +} static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) { @@ -207,7 +229,7 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, if (rq->rq_flags & RQF_MQ_INFLIGHT) { rq->rq_flags &= ~RQF_MQ_INFLIGHT; - atomic_dec(&hctx->nr_active); + __blk_mq_dec_active_requests(hctx); } } @@ -287,7 +309,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, * Allow at least some tags */ depth = max((bt->sb.depth + users - 1) / users, 4U); - return atomic_read(&hctx->nr_active) < depth; + return __blk_mq_active_requests(hctx) < depth; } -- cgit v1.2.3