aboutsummaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds2022-05-23 13:56:39 -0700
committerLinus Torvalds2022-05-23 13:56:39 -0700
commit115cd47132d71bd7e4aa1093e15d861a59e73a94 (patch)
tree42e457126de728c9328e4b9b09b5ca4852a590de /drivers/md
parentf6792c877a1cacc3b3eea7cb5b45857b3c484c51 (diff)
parent2aaf516084184e4e6f80da01b2b3ed882fd20a79 (diff)
Merge tag 'for-5.19/block-2022-05-22' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Here are the core block changes for 5.19. This contains: - blk-throttle accounting fix (Laibin) - Series removing redundant assignments (Michal) - Expose bio cache via the bio_set, so that DM can use it (Mike) - Finish off the bio allocation interface cleanups by dealing with the weirdest member of the family. bio_kmalloc combines a kmalloc for the bio and bio_vecs with a hidden bio_init call and magic cleanup semantics (Christoph) - Clean up the block layer API so that APIs consumed by file systems are (almost) only struct block_device based, so that file systems don't have to poke into block layer internals like the request_queue (Christoph) - Clean up the blk_execute_rq* API (Christoph) - Clean up various lose end in the blk-cgroup code to make it easier to follow in preparation of reworking the blkcg assignment for bios (Christoph) - Fix use-after-free issues in BFQ when processes with merged queues get moved to different cgroups (Jan) - BFQ fixes (Jan) - Various fixes and cleanups (Bart, Chengming, Fanjun, Julia, Ming, Wolfgang, me)" * tag 'for-5.19/block-2022-05-22' of git://git.kernel.dk/linux-block: (83 commits) blk-mq: fix typo in comment bfq: Remove bfq_requeue_request_body() bfq: Remove superfluous conversion from RQ_BIC() bfq: Allow current waker to defend against a tentative one bfq: Relax waker detection for shared queues blk-cgroup: delete rcu_read_lock_held() WARN_ON_ONCE() blk-throttle: Set BIO_THROTTLED when bio has been throttled blk-cgroup: Remove unnecessary rcu_read_lock/unlock() blk-cgroup: always terminate io.stat lines block, bfq: make bfq_has_work() more accurate block, bfq: protect 'bfqd->queued' by 'bfqd->lock' block: cleanup the VM accounting in submit_bio block: Fix the bio.bi_opf comment block: reorder the REQ_ flags blk-iocost: combine local_stat and desc_stat to stat block: improve the error message from bio_check_eod block: allow passing a NULL bdev to bio_alloc_clone/bio_init_clone block: remove superfluous calls to blkcg_bio_issue_init kthread: unexport kthread_blkcg blk-cgroup: cleanup blkcg_maybe_throttle_current ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/debug.c10
-rw-r--r--drivers/md/bcache/request.c4
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c9
-rw-r--r--drivers/md/dm-cache-target.c9
-rw-r--r--drivers/md/dm-clone-target.c9
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-log-writes.c3
-rw-r--r--drivers/md/dm-raid.c9
-rw-r--r--drivers/md/dm-table.c25
-rw-r--r--drivers/md/dm-thin.c15
-rw-r--r--drivers/md/dm.c3
-rw-r--r--drivers/md/md-linear.c11
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c30
-rw-r--r--drivers/md/raid10.c41
-rw-r--r--drivers/md/raid5-cache.c8
-rw-r--r--drivers/md/raid5.c14
21 files changed, 71 insertions, 150 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 097577ae3c47..ce13c272c387 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -336,7 +336,7 @@ static int bch_allocator_thread(void *arg)
mutex_unlock(&ca->set->bucket_lock);
blkdev_issue_discard(ca->bdev,
bucket_to_sector(ca->set, bucket),
- ca->sb.bucket_size, GFP_KERNEL, 0);
+ ca->sb.bucket_size, GFP_KERNEL);
mutex_lock(&ca->set->bucket_lock);
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 6230dfdd9286..7510d1c983a5 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -107,15 +107,16 @@ void bch_btree_verify(struct btree *b)
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
+ unsigned int nr_segs = bio_segments(bio);
struct bio *check;
struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 };
- check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
+ check = bio_kmalloc(nr_segs, GFP_NOIO);
if (!check)
return;
- bio_set_dev(check, bio->bi_bdev);
- check->bi_opf = REQ_OP_READ;
+ bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
+ REQ_OP_READ);
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
@@ -146,7 +147,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bio_free_pages(check);
out_put:
- bio_put(check);
+ bio_uninit(check);
+ kfree(check);
}
#endif
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 320fcdfef48e..9c5dde73da88 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1005,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
bio_get(s->iop.bio);
if (bio_op(bio) == REQ_OP_DISCARD &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ !bdev_max_discard_sectors(dc->bdev))
goto insert_data;
/* I/O request sent to backing device */
@@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
bio->bi_private = ddip;
if ((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ !bdev_max_discard_sectors(dc->bdev))
bio->bi_end_io(bio);
else
submit_bio_noacct(bio);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index bf3de149d3c9..2f49e31142f6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -973,7 +973,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
blk_queue_write_cache(q, true, true);
@@ -2350,7 +2349,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
ca->bdev->bd_holder = ca;
ca->sb_disk = sb_disk;
- if (blk_queue_discard(bdev_get_queue(bdev)))
+ if (bdev_max_discard_sectors((bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index d1029d71ff3b..c6f677059214 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1151,7 +1151,7 @@ STORE(__bch_cache)
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
- if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ if (bdev_max_discard_sectors(ca->bdev))
ca->discard = v;
if (v != CACHE_DISCARD(&ca->sb)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index e9cbc70d5a0e..5ffa1dcf84cf 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -611,7 +611,8 @@ static void bio_complete(struct bio *bio)
{
struct dm_buffer *b = bio->bi_private;
blk_status_t status = bio->bi_status;
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
b->end_io(b, status);
}
@@ -626,16 +627,14 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
vec_size += 2;
- bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
+ bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
use_dmio(b, rw, sector, n_sectors, offset);
return;
}
-
+ bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, b->c->bdev);
- bio_set_op_attrs(bio, rw, 0);
bio->bi_end_io = bio_complete;
bio->bi_private = b;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 780a61bc6cc0..28c5de8eca4a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3329,13 +3329,6 @@ static int cache_iterate_devices(struct dm_target *ti,
return r;
}
-static bool origin_dev_supports_discard(struct block_device *origin_bdev)
-{
- struct request_queue *q = bdev_get_queue(origin_bdev);
-
- return blk_queue_discard(q);
-}
-
/*
* If discard_passdown was enabled verify that the origin device
* supports discards. Disable discard_passdown if not.
@@ -3349,7 +3342,7 @@ static void disable_passdown_if_not_supported(struct cache *cache)
if (!cache->features.discard_passdown)
return;
- if (!origin_dev_supports_discard(origin_bdev))
+ if (!bdev_max_discard_sectors(origin_bdev))
reason = "discard unsupported";
else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 128316a73d01..811b0a5379d0 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2016,13 +2016,6 @@ static void clone_resume(struct dm_target *ti)
do_waker(&clone->waker.work);
}
-static bool bdev_supports_discards(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- return (q && blk_queue_discard(q));
-}
-
/*
* If discard_passdown was enabled verify that the destination device supports
* discards. Disable discard_passdown if not.
@@ -2036,7 +2029,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
return;
- if (!bdev_supports_discards(dest_dev))
+ if (!bdev_max_discard_sectors(dest_dev))
reason = "discard unsupported";
else if (dest_limits->max_discard_sectors < clone->region_size)
reason = "max discard sectors smaller than a region";
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 5762366333a2..e4b95eaeec8c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -311,7 +311,7 @@ static void do_region(int op, int op_flags, unsigned region,
* Reject unsupported discard and write same requests.
*/
if (op == REQ_OP_DISCARD)
- special_cmd_max_sectors = q->limits.max_discard_sectors;
+ special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
else if (op == REQ_OP_WRITE_ZEROES)
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index c9d036d6bb2e..e194226c89e5 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -866,9 +866,8 @@ static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct log_writes_c *lc = ti->private;
- struct request_queue *q = bdev_get_queue(lc->dev->bdev);
- if (!q || !blk_queue_discard(q)) {
+ if (!bdev_max_discard_sectors(lc->dev->bdev)) {
lc->device_supports_discard = false;
limits->discard_granularity = lc->sectorsize;
limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2b26435a6946..9526ccbedafb 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2963,13 +2963,8 @@ static void configure_discard_support(struct raid_set *rs)
raid456 = rs_is_raid456(rs);
for (i = 0; i < rs->raid_disks; i++) {
- struct request_queue *q;
-
- if (!rs->dev[i].rdev.bdev)
- continue;
-
- q = bdev_get_queue(rs->dev[i].rdev.bdev);
- if (!q || !blk_queue_discard(q))
+ if (!rs->dev[i].rdev.bdev ||
+ !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
return;
if (raid456) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 03541cfc2317..e7d42f6335a2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1820,9 +1820,7 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_nonrot(q);
+ return !bdev_nonrot(dev->bdev);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
@@ -1890,9 +1888,7 @@ static bool dm_table_supports_nowait(struct dm_table *t)
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_discard(q);
+ return !bdev_max_discard_sectors(dev->bdev);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1924,9 +1920,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_secure_erase(q);
+ return !bdev_max_secure_erase_sectors(dev->bdev);
}
static bool dm_table_supports_secure_erase(struct dm_table *t)
@@ -1952,9 +1946,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return blk_queue_stable_writes(q);
+ return bdev_stable_writes(dev->bdev);
}
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1974,18 +1966,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
if (!dm_table_supports_discards(t)) {
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- /* Must also clear discard limits... */
q->limits.max_discard_sectors = 0;
q->limits.max_hw_discard_sectors = 0;
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
q->limits.discard_misaligned = 0;
- } else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ }
- if (dm_table_supports_secure_erase(t))
- blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+ if (!dm_table_supports_secure_erase(t))
+ q->limits.max_secure_erase_sectors = 0;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4d25d0e27031..84c083f76673 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -398,8 +398,8 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, 0, &op->bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
+ &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -2802,13 +2802,6 @@ static void requeue_bios(struct pool *pool)
/*----------------------------------------------------------------
* Binding of control targets to a pool object
*--------------------------------------------------------------*/
-static bool data_dev_supports_discard(struct pool_c *pt)
-{
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
- return blk_queue_discard(q);
-}
-
static bool is_factor(sector_t block_size, uint32_t n)
{
return !sector_div(block_size, n);
@@ -2828,7 +2821,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
if (!pt->adjusted_pf.discard_passdown)
return;
- if (!data_dev_supports_discard(pt))
+ if (!bdev_max_discard_sectors(pt->data_dev->bdev))
reason = "discard unsupported";
else if (data_limits->max_discard_sectors < pool->sectors_per_block)
@@ -4057,8 +4050,6 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
/*
* Must explicitly disallow stacking discard limits otherwise the
* block layer will stack them if pool's data device has support.
- * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
- * user to see that, so make sure to set all discard limits to 0.
*/
limits->discard_granularity = 0;
return;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 82957bd460e8..39081338ca61 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -955,7 +955,6 @@ void disable_discard(struct mapped_device *md)
/* device doesn't really support DISCARD, disable it */
limits->max_discard_sectors = 0;
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
}
void disable_write_zeroes(struct mapped_device *md)
@@ -982,7 +981,7 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
- !q->limits.max_discard_sectors)
+ !bdev_max_discard_sectors(bio->bi_bdev))
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!q->limits.max_write_zeroes_sectors)
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 0f55b079371b..138a3b25c5c8 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -64,7 +64,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
struct linear_conf *conf;
struct md_rdev *rdev;
int i, cnt;
- bool discard_supported = false;
conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
if (!conf)
@@ -96,9 +95,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
conf->array_sectors += rdev->sectors;
cnt++;
-
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
}
if (cnt != raid_disks) {
pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
@@ -106,11 +102,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
goto out;
}
- if (!discard_supported)
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
-
/*
* Here we calculate the device offsets.
*/
@@ -252,7 +243,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
+ !bdev_max_discard_sectors(bio->bi_bdev))) {
/* Just ignore it */
bio_endio(bio);
} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 309b3af906ad..2587f872c088 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5991,8 +5991,7 @@ int md_run(struct mddev *mddev)
bool nonrot = true;
rdev_for_each(rdev, mddev) {
- if (rdev->raid_disk >= 0 &&
- !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+ if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
nonrot = false;
break;
}
@@ -8585,7 +8584,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
{
struct bio *discard_bio = NULL;
- if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0,
+ if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
&discard_bio) || !discard_bio)
return;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b21e101183f4..7231f5e1eaa7 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -399,7 +399,6 @@ static int raid0_run(struct mddev *mddev)
conf = mddev->private;
if (mddev->queue) {
struct md_rdev *rdev;
- bool discard_supported = false;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
@@ -412,13 +411,7 @@ static int raid0_run(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
}
- if (!discard_supported)
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
}
/* calculate array device size */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 99d5464a51f8..5aed2c8b746e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -165,9 +165,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios : 1 for reading, n-1 for writing
*/
for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio;
}
/*
@@ -206,8 +207,10 @@ out_free_pages:
resync_free_pages(&rps[j]);
out_free_bio:
- while (++j < pi->raid_disks)
- bio_put(r1_bio->bios[j]);
+ while (++j < pi->raid_disks) {
+ bio_uninit(r1_bio->bios[j]);
+ kfree(r1_bio->bios[j]);
+ }
kfree(rps);
out_free_r1bio:
@@ -225,7 +228,8 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
for (i = pi->raid_disks; i--; ) {
rp = get_resync_pages(r1bio->bios[i]);
resync_free_pages(rp);
- bio_put(r1bio->bios[i]);
+ bio_uninit(r1bio->bios[i]);
+ kfree(r1bio->bios[i]);
}
/* resync pages array stored in the 1st bio's .bi_private */
@@ -704,7 +708,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
/* At least two disks to choose from so failfast is OK */
set_bit(R1BIO_FailFast, &r1_bio->state);
- nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+ nonrot = bdev_nonrot(rdev->bdev);
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
dist = abs(this_sector - conf->mirrors[disk].head_position);
@@ -802,7 +806,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+ !bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1826,8 +1830,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
- if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
}
@@ -3106,7 +3108,6 @@ static int raid1_run(struct mddev *mddev)
int i;
struct md_rdev *rdev;
int ret;
- bool discard_supported = false;
if (mddev->level != 1) {
pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -3141,8 +3142,6 @@ static int raid1_run(struct mddev *mddev)
continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
}
mddev->degraded = 0;
@@ -3179,15 +3178,6 @@ static int raid1_run(struct mddev *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- if (mddev->queue) {
- if (discard_supported)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
- mddev->queue);
- }
-
ret = md_integrity_register(mddev);
if (ret) {
md_unregister_thread(&mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dfe7d62d3fbd..834eb3ba95a6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -145,15 +145,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios.
*/
for (j = nalloc ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r10_bio->devs[j].bio = bio;
if (!conf->have_replacement)
continue;
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r10_bio->devs[j].repl_bio = bio;
}
/*
@@ -197,9 +199,11 @@ out_free_pages:
out_free_bio:
for ( ; j < nalloc; j++) {
if (r10_bio->devs[j].bio)
- bio_put(r10_bio->devs[j].bio);
+ bio_uninit(r10_bio->devs[j].bio);
+ kfree(r10_bio->devs[j].bio);
if (r10_bio->devs[j].repl_bio)
- bio_put(r10_bio->devs[j].repl_bio);
+ bio_uninit(r10_bio->devs[j].repl_bio);
+ kfree(r10_bio->devs[j].repl_bio);
}
kfree(rps);
out_free_r10bio:
@@ -220,12 +224,15 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
if (bio) {
rp = get_resync_pages(bio);
resync_free_pages(rp);
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
bio = r10bio->devs[j].repl_bio;
- if (bio)
- bio_put(bio);
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
}
/* resync pages array stored in the 1st bio's .bi_private */
@@ -796,7 +803,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
- nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+ nonrot = bdev_nonrot(rdev->bdev);
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
if (min_pending > pending && nonrot) {
@@ -888,7 +895,7 @@ static void flush_pending_writes(struct r10conf *conf)
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+ !bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1083,7 +1090,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
+ !bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
@@ -2144,8 +2151,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
- if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
@@ -4069,7 +4074,6 @@ static int raid10_run(struct mddev *mddev)
sector_t size;
sector_t min_offset_diff = 0;
int first = 1;
- bool discard_supported = false;
if (mddev_init_writes_pending(mddev) < 0)
return -ENOMEM;
@@ -4140,20 +4144,9 @@ static int raid10_run(struct mddev *mddev)
rdev->data_offset << 9);
disk->head_position = 0;
-
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
first = 0;
}
- if (mddev->queue) {
- if (discard_supported)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
- mddev->queue);
- }
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
pr_err("md/raid10:%s: not enough operational mirrors.\n",
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index a7d50ff9020a..094a4042589e 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1318,7 +1318,7 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
r5l_write_super(log, end);
- if (!blk_queue_discard(bdev_get_queue(bdev)))
+ if (!bdev_max_discard_sectors(bdev))
return;
mddev = log->rdev->mddev;
@@ -1344,14 +1344,14 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
if (log->last_checkpoint < end) {
blkdev_issue_discard(bdev,
log->last_checkpoint + log->rdev->data_offset,
- end - log->last_checkpoint, GFP_NOIO, 0);
+ end - log->last_checkpoint, GFP_NOIO);
} else {
blkdev_issue_discard(bdev,
log->last_checkpoint + log->rdev->data_offset,
log->device_size - log->last_checkpoint,
- GFP_NOIO, 0);
+ GFP_NOIO);
blkdev_issue_discard(bdev, log->rdev->data_offset, end,
- GFP_NOIO, 0);
+ GFP_NOIO);
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 351d341a1ffa..59f91e392a2a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7242,7 +7242,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
if (test_bit(Journal, &rdev->flags))
continue;
- if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+ if (bdev_nonrot(rdev->bdev)) {
conf->batch_bio_dispatch = false;
break;
}
@@ -7776,14 +7776,10 @@ static int raid5_run(struct mddev *mddev)
* A better idea might be to turn DISCARD into WRITE_ZEROES
* requests, as that is required to be safe.
*/
- if (devices_handle_discard_safely &&
- mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
- mddev->queue->limits.discard_granularity >= stripe)
- blk_queue_flag_set(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
- mddev->queue);
+ if (!devices_handle_discard_safely ||
+ mddev->queue->limits.max_discard_sectors < (stripe >> 9) ||
+ mddev->queue->limits.discard_granularity < stripe)
+ blk_queue_max_discard_sectors(mddev->queue, 0);
blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
}