aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSidhartha Kumar2023-01-13 16:30:50 -0600
committerAndrew Morton2023-02-13 15:54:26 -0800
commit6aa3a920125e9f58891e2b5dc2efd4d0c1ff05a6 (patch)
treebb27f079f73e04726ec3191cbb0422b34daa09f0
parentf528260b1a7d52140dfeb58857e13fc98ac193ef (diff)
mm/hugetlb: convert isolate_hugetlb to folios
Patch series "continue hugetlb folio conversion", v3. This series continues the conversion of core hugetlb functions to use folios. This series converts many helper funtions in the hugetlb fault path. This is in preparation for another series to convert the hugetlb fault code paths to operate on folios. This patch (of 8): Convert isolate_hugetlb() to take in a folio and convert its callers to pass a folio. Use page_folio() to convert the callers to use a folio is safe as isolate_hugetlb() operates on a head page. Link: https://lkml.kernel.org/r/20230113223057.173292-1-sidhartha.kumar@oracle.com Link: https://lkml.kernel.org/r/20230113223057.173292-2-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c2
7 files changed, 15 insertions, 15 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a51e6daacac6..6e38a019f654 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -171,7 +171,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-int isolate_hugetlb(struct page *page, struct list_head *list);
+int isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
@@ -413,7 +413,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline int isolate_hugetlb(struct page *page, struct list_head *list)
+static inline int isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return -EBUSY;
}
diff --git a/mm/gup.c b/mm/gup.c
index 25e4a3d923d6..b0885f70579c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1930,7 +1930,7 @@ static unsigned long collect_longterm_unpinnable_pages(
continue;
if (folio_test_hugetlb(folio)) {
- isolate_hugetlb(&folio->page, movable_page_list);
+ isolate_hugetlb(folio, movable_page_list);
continue;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ab35b1cc9927..0c1e1ce113c8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2925,7 +2925,7 @@ retry:
* Fail with -EBUSY if not possible.
*/
spin_unlock_irq(&hugetlb_lock);
- ret = isolate_hugetlb(&old_folio->page, list);
+ ret = isolate_hugetlb(old_folio, list);
spin_lock_irq(&hugetlb_lock);
goto free_new;
} else if (!folio_test_hugetlb_freed(old_folio)) {
@@ -3000,7 +3000,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
if (hstate_is_gigantic(h))
return -ENOMEM;
- if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list))
+ if (folio_ref_count(folio) && !isolate_hugetlb(folio, list))
ret = 0;
else if (!folio_ref_count(folio))
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
@@ -7250,19 +7250,19 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
* These functions are overwritable if your architecture needs its own
* behavior.
*/
-int isolate_hugetlb(struct page *page, struct list_head *list)
+int isolate_hugetlb(struct folio *folio, struct list_head *list)
{
int ret = 0;
spin_lock_irq(&hugetlb_lock);
- if (!PageHeadHuge(page) ||
- !HPageMigratable(page) ||
- !get_page_unless_zero(page)) {
+ if (!folio_test_hugetlb(folio) ||
+ !folio_test_hugetlb_migratable(folio) ||
+ !folio_try_get(folio)) {
ret = -EBUSY;
goto unlock;
}
- ClearHPageMigratable(page);
- list_move_tail(&page->lru, list);
+ folio_clear_hugetlb_migratable(folio);
+ list_move_tail(&folio->lru, list);
unlock:
spin_unlock_irq(&hugetlb_lock);
return ret;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b4b30d9b0782..db85c2d37f70 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2508,7 +2508,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool isolated = false;
if (PageHuge(page)) {
- isolated = !isolate_hugetlb(page, pagelist);
+ isolated = !isolate_hugetlb(page_folio(page), pagelist);
} else {
bool lru = !__PageMovable(page);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fd40f7e9f176..a1e8c3e9ab08 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1641,7 +1641,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (PageHuge(page)) {
pfn = page_to_pfn(head) + compound_nr(head) - 1;
- isolate_hugetlb(head, &source);
+ isolate_hugetlb(folio, &source);
continue;
} else if (PageTransHuge(page))
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dd5ca942256f..fc034b070645 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -602,7 +602,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
!hugetlb_pmd_shared(pte))) {
- if (isolate_hugetlb(page, qp->pagelist) &&
+ if (isolate_hugetlb(page_folio(page), qp->pagelist) &&
(flags & MPOL_MF_STRICT))
/*
* Failed to isolate page but allow migrating pages
diff --git a/mm/migrate.c b/mm/migrate.c
index 206fcdbe67f3..f6464bce7678 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1773,7 +1773,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
if (PageHuge(page)) {
if (PageHead(page)) {
- err = isolate_hugetlb(page, pagelist);
+ err = isolate_hugetlb(page_folio(page), pagelist);
if (!err)
err = 1;
}