diff options
author | Ritesh Harjani (IBM) | 2023-07-10 14:12:21 -0700 |
---|---|---|
committer | Ritesh Harjani (IBM) | 2023-07-25 10:55:54 +0530 |
commit | 3ea5c76cadeedcc0e40e180d2c4e37dc289fee22 (patch) | |
tree | 0739cb8b643a8bca7e1ac3fbb2617a73aba0edf5 /fs/iomap | |
parent | 04f52c4e6f8001e2d81006027dac08badddafde8 (diff) |
iomap: Drop ifs argument from iomap_set_range_uptodate()
iomap_folio_state (ifs) can be derived directly from the folio, making it
unnecessary to pass "ifs" as an argument to iomap_set_range_uptodate().
This patch eliminates "ifs" argument from iomap_set_range_uptodate()
function.
Also, the definition of iomap_set_range_uptodate() and
ifs_set_range_uptodate() functions are moved above ifs_alloc().
In upcoming patches, we plan to introduce additional helper routines for
handling dirty state, with the intention of consolidating all of "ifs"
state handling routines at one place.
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/iomap')
-rw-r--r-- | fs/iomap/buffered-io.c | 67 |
1 files changed, 33 insertions, 34 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 0ae34b5ba441..6fff25f955e8 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -36,6 +36,33 @@ struct iomap_folio_state { static struct bio_set iomap_ioend_bioset; +static void ifs_set_range_uptodate(struct folio *folio, + struct iomap_folio_state *ifs, size_t off, size_t len) +{ + struct inode *inode = folio->mapping->host; + unsigned int first_blk = off >> inode->i_blkbits; + unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; + unsigned int nr_blks = last_blk - first_blk + 1; + unsigned long flags; + + spin_lock_irqsave(&ifs->state_lock, flags); + bitmap_set(ifs->state, first_blk, nr_blks); + if (bitmap_full(ifs->state, i_blocks_per_folio(inode, folio))) + folio_mark_uptodate(folio); + spin_unlock_irqrestore(&ifs->state_lock, flags); +} + +static void iomap_set_range_uptodate(struct folio *folio, size_t off, + size_t len) +{ + struct iomap_folio_state *ifs = folio->private; + + if (ifs) + ifs_set_range_uptodate(folio, ifs, off, len); + else + folio_mark_uptodate(folio); +} + static struct iomap_folio_state *ifs_alloc(struct inode *inode, struct folio *folio, unsigned int flags) { @@ -137,30 +164,6 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, *lenp = plen; } -static void ifs_set_range_uptodate(struct folio *folio, - struct iomap_folio_state *ifs, size_t off, size_t len) -{ - struct inode *inode = folio->mapping->host; - unsigned first = off >> inode->i_blkbits; - unsigned last = (off + len - 1) >> inode->i_blkbits; - unsigned long flags; - - spin_lock_irqsave(&ifs->state_lock, flags); - bitmap_set(ifs->state, first, last - first + 1); - if (bitmap_full(ifs->state, i_blocks_per_folio(inode, folio))) - folio_mark_uptodate(folio); - spin_unlock_irqrestore(&ifs->state_lock, flags); -} - -static void iomap_set_range_uptodate(struct folio *folio, - struct iomap_folio_state *ifs, size_t off, size_t len) -{ - if (ifs) - ifs_set_range_uptodate(folio, ifs, off, len); - else - folio_mark_uptodate(folio); -} - static void iomap_finish_folio_read(struct folio *folio, size_t offset, size_t len, int error) { @@ -170,7 +173,7 @@ static void iomap_finish_folio_read(struct folio *folio, size_t offset, folio_clear_uptodate(folio); folio_set_error(folio); } else { - iomap_set_range_uptodate(folio, ifs, offset, len); + iomap_set_range_uptodate(folio, offset, len); } if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending)) @@ -206,7 +209,6 @@ struct iomap_readpage_ctx { static int iomap_read_inline_data(const struct iomap_iter *iter, struct folio *folio) { - struct iomap_folio_state *ifs; const struct iomap *iomap = iomap_iter_srcmap(iter); size_t size = i_size_read(iter->inode) - iomap->offset; size_t poff = offset_in_page(iomap->offset); @@ -224,15 +226,13 @@ static int iomap_read_inline_data(const struct iomap_iter *iter, if (WARN_ON_ONCE(size > iomap->length)) return -EIO; if (offset > 0) - ifs = ifs_alloc(iter->inode, folio, iter->flags); - else - ifs = folio->private; + ifs_alloc(iter->inode, folio, iter->flags); addr = kmap_local_folio(folio, offset); memcpy(addr, iomap->inline_data, size); memset(addr + size, 0, PAGE_SIZE - poff - size); kunmap_local(addr); - iomap_set_range_uptodate(folio, ifs, offset, PAGE_SIZE - poff); + iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff); return 0; } @@ -269,7 +269,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, if (iomap_block_needs_zeroing(iter, pos)) { folio_zero_range(folio, poff, plen); - iomap_set_range_uptodate(folio, ifs, poff, plen); + iomap_set_range_uptodate(folio, poff, plen); goto done; } @@ -578,7 +578,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, if (status) return status; } - iomap_set_range_uptodate(folio, ifs, poff, plen); + iomap_set_range_uptodate(folio, poff, plen); } while ((block_start += plen) < block_end); return 0; @@ -685,7 +685,6 @@ out_unlock: static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, size_t copied, struct folio *folio) { - struct iomap_folio_state *ifs = folio->private; flush_dcache_folio(folio); /* @@ -701,7 +700,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, */ if (unlikely(copied < len && !folio_test_uptodate(folio))) return 0; - iomap_set_range_uptodate(folio, ifs, offset_in_folio(folio, pos), len); + iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); filemap_dirty_folio(inode->i_mapping, folio); return copied; } |