From 7cb1d7ef667716a9ff4e692e7ba1c3817d872222 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 3 Mar 2023 10:12:18 -0500 Subject: mm/khugepaged: cleanup memcg uncharge for failure path Explicit memcg uncharging is not needed when the memcg accounting has the same lifespan of the page/folio. That becomes the case for khugepaged after Yang & Zach's recent rework so the hpage will be allocated for each collapse rather than being cached. Cleanup the explicit memcg uncharge in khugepaged failure path and leave that for put_page(). Link: https://lkml.kernel.org/r/20230303151218.311015-1-peterx@redhat.com Signed-off-by: Peter Xu Suggested-by: Zach O'Keefe Reviewed-by: Zach O'Keefe Reviewed-by: Yang Shi Cc: David Stevens Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/khugepaged.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'mm/khugepaged.c') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 17562c692546..074ea534f786 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1135,10 +1135,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, out_up_write: mmap_write_unlock(mm); out_nolock: - if (hpage) { - mem_cgroup_uncharge(page_folio(hpage)); + if (hpage) put_page(hpage); - } trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); return result; } @@ -2137,10 +2135,8 @@ xa_unlocked: unlock_page(hpage); out: VM_BUG_ON(!list_empty(&pagelist)); - if (hpage) { - mem_cgroup_uncharge(page_folio(hpage)); + if (hpage) put_page(hpage); - } trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); return result; -- cgit v1.2.3