aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorWei Yang2020-01-30 22:14:32 -0800
committerLinus Torvalds2020-01-31 10:30:39 -0800
commita8803e6c177f490fe5624397b07f4eb05a15f532 (patch)
tree2f4391a350de79f0e7d08facaa2f46f01821015d /mm
parentcb829624867b5ab10bc6a7036d183b1b82bfe9f8 (diff)
mm/huge_memory.c: use head to emphasize the purpose of page
During split huge page, it checks the property of the page. Currently we do the check on page and head without emphasizing the check is on the compound page. In case the page passed to split_huge_page_to_list is a tail page, audience would take some time to think about whether the check is on compound page or tail page itself. To make it explicit, use head instead of page for those checks. After this, audience would be more clear about the checks are on compound page and the page is used to do the split and dump error message if failed. Link: http://lkml.kernel.org/r/20200110032610.26499-2-richardw.yang@linux.intel.com Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ee1dafedd8fe..4439e91f8443 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2704,7 +2704,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct page *head = compound_head(page);
struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
- struct deferred_split *ds_queue = get_deferred_split_queue(page);
+ struct deferred_split *ds_queue = get_deferred_split_queue(head);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int count, mapcount, extra_pins, ret;
@@ -2713,10 +2713,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageCompound(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(head), head);
+ VM_BUG_ON_PAGE(!PageCompound(head), head);
- if (PageWriteback(page))
+ if (PageWriteback(head))
return -EBUSY;
if (PageAnon(head)) {
@@ -2767,7 +2767,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out_unlock;
}
- mlocked = PageMlocked(page);
+ mlocked = PageMlocked(head);
unmap_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);
@@ -2800,10 +2800,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
list_del(page_deferred_list(head));
}
if (mapping) {
- if (PageSwapBacked(page))
- __dec_node_page_state(page, NR_SHMEM_THPS);
+ if (PageSwapBacked(head))
+ __dec_node_page_state(head, NR_SHMEM_THPS);
else
- __dec_node_page_state(page, NR_FILE_THPS);
+ __dec_node_page_state(head, NR_FILE_THPS);
}
spin_unlock(&ds_queue->split_queue_lock);