diff options
author | Matthew Wilcox (Oracle) | 2024-02-27 17:42:45 +0000 |
---|---|---|
committer | Andrew Morton | 2024-03-04 17:01:25 -0800 |
commit | bc2ff4cbc3294c01f29449405c42ee26ee0e1f59 (patch) | |
tree | 551f9a02091ff7a5c94fafae743618d3ab4eaffb /mm/vmscan.c | |
parent | f77171d241e379ea93448a53d58104191e02135c (diff) |
mm: free folios in a batch in shrink_folio_list()
Use free_unref_page_batch() to free the folios. This may increase the
number of IPIs from calling try_to_unmap_flush() more often, but that's
going to be very workload-dependent. It may even reduce the number of
IPIs as we now batch-free large folios instead of freeing them one at a
time.
Link: https://lkml.kernel.org/r/20240227174254.710559-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index dcfbe617e9ef..144879d5bb89 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1006,14 +1006,15 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, struct pglist_data *pgdat, struct scan_control *sc, struct reclaim_stat *stat, bool ignore_references) { + struct folio_batch free_folios; LIST_HEAD(ret_folios); - LIST_HEAD(free_folios); LIST_HEAD(demote_folios); unsigned int nr_reclaimed = 0; unsigned int pgactivate = 0; bool do_demote_pass; struct swap_iocb *plug = NULL; + folio_batch_init(&free_folios); memset(stat, 0, sizeof(*stat)); cond_resched(); do_demote_pass = can_demote(pgdat->node_id, sc); @@ -1412,14 +1413,11 @@ free_it: */ nr_reclaimed += nr_pages; - /* - * Is there need to periodically free_folio_list? It would - * appear not as the counts should be low - */ - if (unlikely(folio_test_large(folio))) - destroy_large_folio(folio); - else - list_add(&folio->lru, &free_folios); + if (folio_batch_add(&free_folios, folio) == 0) { + mem_cgroup_uncharge_folios(&free_folios); + try_to_unmap_flush(); + free_unref_folios(&free_folios); + } continue; activate_locked_split: @@ -1483,9 +1481,9 @@ keep: pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; - mem_cgroup_uncharge_list(&free_folios); + mem_cgroup_uncharge_folios(&free_folios); try_to_unmap_flush(); - free_unref_page_list(&free_folios); + free_unref_folios(&free_folios); list_splice(&ret_folios, folio_list); count_vm_events(PGACTIVATE, pgactivate); |