diff options
author | Matthew Wilcox (Oracle) | 2021-08-16 23:36:31 -0400 |
---|---|---|
committer | Matthew Wilcox (Oracle) | 2022-01-04 13:15:33 -0500 |
commit | 9f2b04a25a41b1f41b3cead4f56854a4192ec5b0 (patch) | |
tree | 27f4d1ed8d4626e3bf9938c1e768eefdc4e4bfd9 /mm/migrate.c | |
parent | 5bf34d7c7ffe773c3b3c1b6ebf39e0f34a2436ec (diff) |
filemap: Add folio_put_wait_locked()
Convert all three callers of put_and_wait_on_page_locked() to
folio_put_wait_locked(). This shrinks the kernel overall by 19 bytes.
filemap_update_page() shrinks by 19 bytes while __migration_entry_wait()
is unchanged. folio_put_wait_locked() is 14 bytes smaller than
put_and_wait_on_page_locked(), but pmd_migration_entry_wait() grows by
14 bytes. It removes the assumption from pmd_migration_entry_wait()
that pages cannot be larger than a PMD (which is true today, but
may be interesting to explore in the future).
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index cf25b00f03c8..311638177536 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -291,7 +291,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, { pte_t pte; swp_entry_t entry; - struct page *page; + struct folio *folio; spin_lock(ptl); pte = *ptep; @@ -302,18 +302,17 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, if (!is_migration_entry(entry)) goto out; - page = pfn_swap_entry_to_page(entry); - page = compound_head(page); + folio = page_folio(pfn_swap_entry_to_page(entry)); /* * Once page cache replacement of page migration started, page_count - * is zero; but we must not call put_and_wait_on_page_locked() without - * a ref. Use get_page_unless_zero(), and just fault again if it fails. + * is zero; but we must not call folio_put_wait_locked() without + * a ref. Use folio_try_get(), and just fault again if it fails. */ - if (!get_page_unless_zero(page)) + if (!folio_try_get(folio)) goto out; pte_unmap_unlock(ptep, ptl); - put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); + folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); return; out: pte_unmap_unlock(ptep, ptl); @@ -338,16 +337,16 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) { spinlock_t *ptl; - struct page *page; + struct folio *folio; ptl = pmd_lock(mm, pmd); if (!is_pmd_migration_entry(*pmd)) goto unlock; - page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)); - if (!get_page_unless_zero(page)) + folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd))); + if (!folio_try_get(folio)) goto unlock; spin_unlock(ptl); - put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); + folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); return; unlock: spin_unlock(ptl); |