aboutsummaryrefslogtreecommitdiff
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle)2021-12-07 14:15:07 -0500
committerMatthew Wilcox (Oracle)2022-01-08 00:28:41 -0500
commit51dcbdac28d4dde915f78adf08bb3fac87f516e9 (patch)
treec61ff3d6a485dd854e45daf3316a96bfc860a289 /mm/shmem.c
parent0e499ed3d7a216706e02eeded562627d3e69dcfd (diff)
mm: Convert find_lock_entries() to use a folio_batch
find_lock_entries() already only returned the head page of folios, so convert it to return a folio_batch instead of a pagevec. That cascades through converting truncate_inode_pages_range() to delete_from_page_cache_batch() and page_cache_delete_batch(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index e909c163fb38..bbfa2d05e787 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -919,7 +919,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
pgoff_t end = (lend + 1) >> PAGE_SHIFT;
unsigned int partial_start = lstart & (PAGE_SIZE - 1);
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
- struct pagevec pvec;
struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0;
@@ -932,12 +931,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (info->fallocend > start && info->fallocend <= end && !unfalloc)
info->fallocend = start;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
index = start;
while (index < end && find_lock_entries(mapping, index, end - 1,
- &pvec, indices)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct folio *folio = (struct folio *)pvec.pages[i];
+ &fbatch, indices)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
index = indices[i];
@@ -954,8 +953,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
truncate_inode_folio(mapping, folio);
folio_unlock(folio);
}
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
+ folio_batch_remove_exceptionals(&fbatch);
+ folio_batch_release(&fbatch);
cond_resched();
index++;
}
@@ -988,7 +987,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (start >= end)
return;
- folio_batch_init(&fbatch);
index = start;
while (index < end) {
cond_resched();