aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche2022-08-15 10:00:43 -0700
committerJens Axboe2022-08-22 10:07:53 -0600
commita4e1d0b76e7b32c0839e72679c530445172a2564 (patch)
treecdb61b21f077be9e0ace4ebdb8dc6aa5163c8d97 /block
parent10b41ea15e81a5597bc5944a7900e9a790bd984a (diff)
block: Change the return type of blk_mq_map_queues() into void
Since blk_mq_map_queues() and the .map_queues() callbacks always return 0, change their return type into void. Most callers ignore the returned value anyway. Cc: Christoph Hellwig <hch@lst.de> Cc: Jason Wang <jasowang@redhat.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Doug Gilbert <dgilbert@interlog.com> Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: John Garry <john.garry@huawei.com> Acked-by: Md Haris Iqbal <haris.iqbal@ionos.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Link: https://lore.kernel.org/r/20220815170043.19489-3-bvanassche@acm.org [axboe: fold in fix from Bart] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-cpumap.c4
-rw-r--r--block/blk-mq-pci.c7
-rw-r--r--block/blk-mq-rdma.c6
-rw-r--r--block/blk-mq-virtio.c7
-rw-r--r--block/blk-mq.c10
5 files changed, 15 insertions, 19 deletions
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 3db84d3197f1..9c2fce1a7b50 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -32,7 +32,7 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
unsigned int *map = qmap->mq_map;
unsigned int nr_queues = qmap->nr_queues;
@@ -70,8 +70,6 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
map[cpu] = map[first_sibling];
}
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index b595a94c4d16..a90b88fd1332 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -23,8 +23,8 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset)
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+ int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
@@ -38,11 +38,10 @@ int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
- return 0;
+ return;
fallback:
WARN_ON_ONCE(qmap->nr_queues > 1);
blk_mq_clear_mq_map(qmap);
- return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index 14f968e58b8f..29c1f4d6eb04 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -21,7 +21,7 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping.
*/
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
+void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec)
{
const struct cpumask *mask;
@@ -36,9 +36,9 @@ int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
map->mq_map[cpu] = map->queue_offset + queue;
}
- return 0;
+ return;
fallback:
- return blk_mq_map_queues(map);
+ blk_mq_map_queues(map);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 7b8a42c35102..6589f076a096 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -21,7 +21,7 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec)
{
const struct cpumask *mask;
@@ -39,8 +39,9 @@ int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
- return 0;
+ return;
+
fallback:
- return blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3c1e6b6d991d..4b90d2d8cfb0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4190,7 +4190,7 @@ static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
return 0;
}
-static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
/*
* blk_mq_map_queues() and multiple .map_queues() implementations
@@ -4220,10 +4220,10 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_maps; i++)
blk_mq_clear_mq_map(&set->map[i]);
- return set->ops->map_queues(set);
+ set->ops->map_queues(set);
} else {
BUG_ON(set->nr_maps > 1);
- return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
}
@@ -4322,9 +4322,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
}
- ret = blk_mq_update_queue_map(set);
- if (ret)
- goto out_free_mq_map;
+ blk_mq_update_queue_map(set);
ret = blk_mq_alloc_set_map_and_rqs(set);
if (ret)