aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche2019-10-25 09:50:09 -0700
committerJens Axboe2019-10-25 14:12:18 -0600
commitac0d6b926e741f328b23c8af0134312af7c032d9 (patch)
tree86ed35737eabb038778c10c87714d0ac1041fc7e /block
parenta9a808084d6ac015ac68b4223ac5f6ef31f8076e (diff)
block: Reduce the amount of memory required per request queue
Instead of always allocating at least nr_cpu_ids hardware queues per request queue, reallocate q->queue_hw_ctx if it has to grow. This patch improves behavior that was introduced by commit 868f2f0b7206 ("blk-mq: dynamic h/w context count"). Cc: Keith Busch <keith.busch@intel.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Ming Lei <ming.lei@redhat.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7528678ef41f..ba09cda49953 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2761,6 +2761,23 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
+ if (q->nr_hw_queues < set->nr_hw_queues) {
+ struct blk_mq_hw_ctx **new_hctxs;
+
+ new_hctxs = kcalloc_node(set->nr_hw_queues,
+ sizeof(*new_hctxs), GFP_KERNEL,
+ set->numa_node);
+ if (!new_hctxs)
+ return;
+ if (hctxs)
+ memcpy(new_hctxs, hctxs, q->nr_hw_queues *
+ sizeof(*hctxs));
+ q->queue_hw_ctx = new_hctxs;
+ q->nr_hw_queues = set->nr_hw_queues;
+ kfree(hctxs);
+ hctxs = new_hctxs;
+ }
+
/* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2848,12 +2865,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->queue_hw_ctx = kcalloc_node(nr_hw_queues(set),
- sizeof(*(q->queue_hw_ctx)), GFP_KERNEL,
- set->numa_node);
- if (!q->queue_hw_ctx)
- goto err_sys_init;
-
INIT_LIST_HEAD(&q->unused_hctx_list);
spin_lock_init(&q->unused_hctx_lock);
@@ -2901,7 +2912,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
err_hctxs:
kfree(q->queue_hw_ctx);
q->nr_hw_queues = 0;
-err_sys_init:
blk_mq_sysfs_deinit(q);
err_poll:
blk_stat_free_callback(q->poll_cb);