aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShachar Raindel2015-04-14 15:46:29 -0700
committerLinus Torvalds2015-04-14 16:49:03 -0700
commit28766805275c12c2298883cece3f98505ac764b4 (patch)
tree332f2b844899b5c2f86500bdd837bff047e01eb1
parent4e047f897771222215ee572e1c0b25e9417376eb (diff)
mm: refactor do_wp_page - rewrite the unlock flow
When do_wp_page is ending, in several cases it needs to unlock the pages and ptls it was accessing. Currently, this logic was "called" by using a goto jump. This makes following the control flow of the function harder. Readability was further hampered by the unlock case containing large amount of logic needed only in one of the 3 cases. Using goto for cleanup is generally allowed. However, moving the trivial unlocking flows to the relevant call sites allow deeper refactoring in the next patch. Signed-off-by: Shachar Raindel <raindel@mellanox.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: Haggai Eran <haggaie@mellanox.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Peter Feiner <pfeiner@google.com> Cc: Michel Lespinasse <walken@google.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memory.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e70685f3e836..0e28fddafdaf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2066,7 +2066,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *old_page, *new_page = NULL;
pte_t entry;
- int ret = 0;
+ int page_copied = 0;
unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */
struct mem_cgroup *memcg;
@@ -2101,7 +2101,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
- goto unlock;
+ pte_unmap_unlock(page_table, ptl);
+ page_cache_release(old_page);
+ return 0;
}
page_cache_release(old_page);
}
@@ -2148,7 +2150,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
- goto unlock;
+ pte_unmap_unlock(page_table, ptl);
+ page_cache_release(old_page);
+ return 0;
}
page_mkwrite = 1;
}
@@ -2246,29 +2250,28 @@ gotten:
/* Free the old page.. */
new_page = old_page;
- ret |= VM_FAULT_WRITE;
+ page_copied = 1;
} else
mem_cgroup_cancel_charge(new_page, memcg);
if (new_page)
page_cache_release(new_page);
-unlock:
+
pte_unmap_unlock(page_table, ptl);
- if (mmun_end > mmun_start)
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
*/
- if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
+ if (page_copied && (vma->vm_flags & VM_LOCKED)) {
lock_page(old_page); /* LRU manipulation */
munlock_vma_page(old_page);
unlock_page(old_page);
}
page_cache_release(old_page);
}
- return ret;
+ return page_copied ? VM_FAULT_WRITE : 0;
oom_free_new:
page_cache_release(new_page);
oom: