aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid5.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7b820b81d8c2..f787c9e5b10e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6079,6 +6079,38 @@ out_release:
return ret;
}
+/*
+ * If the bio covers multiple data disks, find sector within the bio that has
+ * the lowest chunk offset in the first chunk.
+ */
+static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
+ struct bio *bi)
+{
+ int sectors_per_chunk = conf->chunk_sectors;
+ int raid_disks = conf->raid_disks;
+ int dd_idx;
+ struct stripe_head sh;
+ unsigned int chunk_offset;
+ sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
+ sector_t sector;
+
+ /* We pass in fake stripe_head to get back parity disk numbers */
+ sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
+ chunk_offset = sector_div(sector, sectors_per_chunk);
+ if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
+ return r_sector;
+ /*
+ * Bio crosses to the next data disk. Check whether it's in the same
+ * chunk.
+ */
+ dd_idx++;
+ while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
+ dd_idx++;
+ if (dd_idx >= raid_disks)
+ return r_sector;
+ return r_sector + sectors_per_chunk - chunk_offset;
+}
+
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
md_account_bio(mddev, &bi);
+ /*
+ * Lets start with the stripe with the lowest chunk offset in the first
+ * chunk. That has the best chances of creating IOs adjacent to
+ * previous IOs in case of sequential IO and thus creates the most
+ * sequential IO pattern. We don't bother with the optimization when
+ * reshaping as the performance benefit is not worth the complexity.
+ */
+ if (likely(conf->reshape_progress == MaxSector))
+ logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
+ s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
+
add_wait_queue(&conf->wait_for_overlap, &wait);
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
@@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
continue;
}
- s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
+ s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);
if (s == stripe_cnt)
break;