aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorQu Wenruo2022-04-01 19:23:26 +0800
committerDavid Sterba2022-05-16 17:03:15 +0200
commit6346f6bf16a0fd76f5f50077c469fa8e88724996 (patch)
tree3759482324c21ac7e6a832fdb4cb7c54fe55c0c7 /fs
parentf77183dc1f53b2f924e9ba3bd1602b585097ec3d (diff)
btrfs: raid56: make raid56_add_scrub_pages() subpage compatible
This requires one extra parameter @pgoff for the function. In the current code base, scrub is still one page per sector, thus the new parameter will always be 0. It needs the extra subpage scrub optimization code to fully take advantage. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/raid56.c10
-rw-r--r--fs/btrfs/raid56.h2
-rw-r--r--fs/btrfs/scrub.c6
3 files changed, 12 insertions, 6 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 84eb4890eea1..1f310bd381a8 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2381,17 +2381,19 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
/* Used for both parity scrub and missing. */
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
- u64 logical)
+ unsigned int pgoff, u64 logical)
{
+ const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
int stripe_offset;
int index;
ASSERT(logical >= rbio->bioc->raid_map[0]);
- ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] +
+ ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
- index = stripe_offset >> PAGE_SHIFT;
- rbio->bio_pages[index] = page;
+ index = stripe_offset / sectorsize;
+ rbio->bio_sectors[index].page = page;
+ rbio->bio_sectors[index].pgoff = pgoff;
}
/*
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 006b4741e5c1..aaad08aefd7d 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -35,7 +35,7 @@ int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc, u32 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
- u64 logical);
+ unsigned int pgoff, u64 logical);
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
struct btrfs_io_context *bioc, u32 stripe_len,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b79a3221d7af..6ac711fa793c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2205,7 +2205,11 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
for (i = 0; i < sblock->sector_count; i++) {
struct scrub_sector *sector = sblock->sectors[i];
- raid56_add_scrub_pages(rbio, sector->page, sector->logical);
+ /*
+ * For now, our scrub is still one page per sector, so pgoff
+ * is always 0.
+ */
+ raid56_add_scrub_pages(rbio, sector->page, 0, sector->logical);
}
btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);