aboutsummaryrefslogtreecommitdiff
path: root/mm/memremap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle)2024-04-05 16:32:27 +0100
committerAndrew Morton2024-04-25 20:56:44 -0700
commit9f100e3b37590828ae23b0210ee634d14b28b8e8 (patch)
tree18da8c0270f6ef2122375fb821e1a1827fe03353 /mm/memremap.c
parent79a48287515848c18a49d75c1fdf176c82bb13cf (diff)
mm: convert free_zone_device_page to free_zone_device_folio
Both callers already have a folio; pass it in and save a few calls to compound_head(). Link: https://lkml.kernel.org/r/20240405153228.2563754-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memremap.c')
-rw-r--r--mm/memremap.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/mm/memremap.c b/mm/memremap.c
index 9e9fb1972fff..e1776693e2ea 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -456,21 +456,23 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
}
EXPORT_SYMBOL_GPL(get_dev_pagemap);
-void free_zone_device_page(struct page *page)
+void free_zone_device_folio(struct folio *folio)
{
- if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free))
+ if (WARN_ON_ONCE(!folio->page.pgmap->ops ||
+ !folio->page.pgmap->ops->page_free))
return;
- mem_cgroup_uncharge(page_folio(page));
+ mem_cgroup_uncharge(folio);
/*
* Note: we don't expect anonymous compound pages yet. Once supported
* and we could PTE-map them similar to THP, we'd have to clear
* PG_anon_exclusive on all tail pages.
*/
- VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page);
- if (PageAnon(page))
- __ClearPageAnonExclusive(page);
+ if (folio_test_anon(folio)) {
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ __ClearPageAnonExclusive(folio_page(folio, 0));
+ }
/*
* When a device managed page is freed, the folio->mapping field
@@ -481,20 +483,20 @@ void free_zone_device_page(struct page *page)
*
* For other types of ZONE_DEVICE pages, migration is either
* handled differently or not done at all, so there is no need
- * to clear page->mapping.
+ * to clear folio->mapping.
*/
- page->mapping = NULL;
- page->pgmap->ops->page_free(page);
+ folio->mapping = NULL;
+ folio->page.pgmap->ops->page_free(folio_page(folio, 0));
- if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
- page->pgmap->type != MEMORY_DEVICE_COHERENT)
+ if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE &&
+ folio->page.pgmap->type != MEMORY_DEVICE_COHERENT)
/*
- * Reset the page count to 1 to prepare for handing out the page
+ * Reset the refcount to 1 to prepare for handing out the page
* again.
*/
- set_page_count(page, 1);
+ folio_set_count(folio, 1);
else
- put_dev_pagemap(page->pgmap);
+ put_dev_pagemap(folio->page.pgmap);
}
void zone_device_page_init(struct page *page)