diff options
author | Matthew Wilcox (Oracle) | 2022-06-04 16:39:04 -0400 |
---|---|---|
committer | Matthew Wilcox (Oracle) | 2022-06-29 08:51:06 -0400 |
commit | 1508062ecd5515bcd50e76ceda20d626e8bfe939 (patch) | |
tree | 362992f6027d05864de163e161fafc4167c71295 /fs/hugetlbfs | |
parent | bbfe4f66002a1a0816e2f5dd3c87dd76dbfd41e6 (diff) |
hugetlbfs: Convert remove_inode_hugepages() to use filemap_get_folios()
Use folios throughout this function. That removes the last caller of
huge_pagevec_release(), so delete that too.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
Diffstat (limited to 'fs/hugetlbfs')
-rw-r--r-- | fs/hugetlbfs/inode.c | 44 |
1 files changed, 14 insertions, 30 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ae2524480f23..14d33f725e05 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -108,16 +108,6 @@ static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) } #endif -static void huge_pagevec_release(struct pagevec *pvec) -{ - int i; - - for (i = 0; i < pagevec_count(pvec); ++i) - put_page(pvec->pages[i]); - - pagevec_reinit(pvec); -} - /* * Mask used when checking the page offset value passed in via system * calls. This value will be converted to a loff_t which is signed. @@ -480,25 +470,19 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, struct address_space *mapping = &inode->i_data; const pgoff_t start = lstart >> huge_page_shift(h); const pgoff_t end = lend >> huge_page_shift(h); - struct pagevec pvec; + struct folio_batch fbatch; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); - pagevec_init(&pvec); + folio_batch_init(&fbatch); next = start; - while (next < end) { - /* - * When no more pages are found, we are done. - */ - if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) - break; - - for (i = 0; i < pagevec_count(&pvec); ++i) { - struct page *page = pvec.pages[i]; + while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { + for (i = 0; i < folio_batch_count(&fbatch); ++i) { + struct folio *folio = fbatch.folios[i]; u32 hash = 0; - index = page->index; + index = folio->index; if (!truncate_op) { /* * Only need to hold the fault mutex in the @@ -511,15 +495,15 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, } /* - * If page is mapped, it was faulted in after being + * If folio is mapped, it was faulted in after being * unmapped in caller. Unmap (again) now after taking * the fault mutex. The mutex will prevent faults - * until we finish removing the page. + * until we finish removing the folio. * * This race can only happen in the hole punch case. * Getting here in a truncate operation is a bug. */ - if (unlikely(page_mapped(page))) { + if (unlikely(folio_mapped(folio))) { BUG_ON(truncate_op); mutex_unlock(&hugetlb_fault_mutex_table[hash]); @@ -532,7 +516,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, i_mmap_unlock_write(mapping); } - lock_page(page); + folio_lock(folio); /* * We must free the huge page and remove from page * cache (remove_huge_page) BEFORE removing the @@ -542,8 +526,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, * the subpool and global reserve usage count can need * to be adjusted. */ - VM_BUG_ON(HPageRestoreReserve(page)); - remove_huge_page(page); + VM_BUG_ON(HPageRestoreReserve(&folio->page)); + remove_huge_page(&folio->page); freed++; if (!truncate_op) { if (unlikely(hugetlb_unreserve_pages(inode, @@ -551,11 +535,11 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, hugetlb_fix_reserve_counts(inode); } - unlock_page(page); + folio_unlock(folio); if (!truncate_op) mutex_unlock(&hugetlb_fault_mutex_table[hash]); } - huge_pagevec_release(&pvec); + folio_batch_release(&fbatch); cond_resched(); } |