diff options
author | Matthew Wilcox (Oracle) | 2022-06-17 18:50:03 +0100 |
---|---|---|
committer | akpm | 2022-07-03 18:08:45 -0700 |
commit | 7a3dbfe8a52b5d7a1639aa0bf7b3a3271d9e6e05 (patch) | |
tree | 5d93f5f29e562871402bf817ce4f86b41b224c8c | |
parent | 70dea5346ea327499f9a71e77bec2732e4d422ed (diff) |
mm/swap: convert lru_deactivate_file to a folio_batch
Use a folio throughout lru_deactivate_file_fn(), removing many hidden
calls to compound_head(). Shrinks the kernel by 864 bytes of text.
Link: https://lkml.kernel.org/r/20220617175020.717127-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/swap.c | 82 |
1 files changed, 39 insertions, 43 deletions
diff --git a/mm/swap.c b/mm/swap.c index cb7669bea85b..75c72d235479 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -62,7 +62,7 @@ static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { struct lru_pvecs { local_lock_t lock; struct folio_batch lru_add; - struct pagevec lru_deactivate_file; + struct folio_batch lru_deactivate_file; struct pagevec lru_deactivate; struct pagevec lru_lazyfree; #ifdef CONFIG_SMP @@ -562,56 +562,57 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, } /* - * If the page can not be invalidated, it is moved to the + * If the folio cannot be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * - * If the page isn't page_mapped and dirty/writeback, the page - * could reclaim asap using PG_reclaim. + * If the folio isn't mapped and dirty/writeback, the folio + * could be reclaimed asap using the reclaim flag. * - * 1. active, mapped page -> none - * 2. active, dirty/writeback page -> inactive, head, PG_reclaim - * 3. inactive, mapped page -> none - * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim + * 1. active, mapped folio -> none + * 2. active, dirty/writeback folio -> inactive, head, reclaim + * 3. inactive, mapped folio -> none + * 4. inactive, dirty/writeback folio -> inactive, head, reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * - * In 4, why it moves inactive's head, the VM expects the page would - * be write it out by flusher threads as this is much more effective + * In 4, it moves to the head of the inactive list so the folio is + * written out by flusher threads as this is much more efficient * than the single-page writeout from reclaim. */ -static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) +static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio) { - bool active = PageActive(page); - int nr_pages = thp_nr_pages(page); + bool active = folio_test_active(folio); + long nr_pages = folio_nr_pages(folio); - if (PageUnevictable(page)) + if (folio_test_unevictable(folio)) return; - /* Some processes are using the page */ - if (page_mapped(page)) + /* Some processes are using the folio */ + if (folio_mapped(folio)) return; - del_page_from_lru_list(page, lruvec); - ClearPageActive(page); - ClearPageReferenced(page); + lruvec_del_folio(lruvec, folio); + folio_clear_active(folio); + folio_clear_referenced(folio); - if (PageWriteback(page) || PageDirty(page)) { + if (folio_test_writeback(folio) || folio_test_dirty(folio)) { /* - * PG_reclaim could be raced with end_page_writeback - * It can make readahead confusing. But race window - * is _really_ small and it's non-critical problem. + * Setting the reclaim flag could race with + * folio_end_writeback() and confuse readahead. But the + * race window is _really_ small and it's not a critical + * problem. */ - add_page_to_lru_list(page, lruvec); - SetPageReclaim(page); + lruvec_add_folio(lruvec, folio); + folio_set_reclaim(folio); } else { /* - * The page's writeback ends up during pagevec - * We move that page into tail of inactive. + * The folio's writeback ended while it was in the batch. + * We move that folio to the tail of the inactive list. */ - add_page_to_lru_list_tail(page, lruvec); + lruvec_add_folio_tail(lruvec, folio); __count_vm_events(PGROTATED, nr_pages); } @@ -685,9 +686,9 @@ void lru_add_drain_cpu(int cpu) local_unlock_irqrestore(&lru_rotate.lock, flags); } - pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); - if (pagevec_count(pvec)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); + fbatch = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); + if (folio_batch_count(fbatch)) + folio_batch_move_lru(fbatch, lru_deactivate_file_fn); pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu); if (pagevec_count(pvec)) @@ -701,32 +702,27 @@ void lru_add_drain_cpu(int cpu) } /** - * deactivate_file_folio() - Forcefully deactivate a file folio. + * deactivate_file_folio() - Deactivate a file folio. * @folio: Folio to deactivate. * * This function hints to the VM that @folio is a good reclaim candidate, * for example if its invalidation fails due to the folio being dirty * or under writeback. * - * Context: Caller holds a reference on the page. + * Context: Caller holds a reference on the folio. */ void deactivate_file_folio(struct folio *folio) { - struct pagevec *pvec; + struct folio_batch *fbatch; - /* - * In a workload with many unevictable pages such as mprotect, - * unevictable folio deactivation for accelerating reclaim is pointless. - */ + /* Deactivating an unevictable folio will not accelerate reclaim */ if (folio_test_unevictable(folio)) return; folio_get(folio); local_lock(&lru_pvecs.lock); - pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); - - if (pagevec_add_and_need_flush(pvec, &folio->page)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); + fbatch = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); + folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); local_unlock(&lru_pvecs.lock); } @@ -899,7 +895,7 @@ static inline void __lru_add_drain_all(bool force_all_cpus) if (folio_batch_count(&per_cpu(lru_pvecs.lru_add, cpu)) || data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) || - pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || + folio_batch_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || need_activate_page_drain(cpu) || |