diff options
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r-- | mm/userfaultfd.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 512576e171ce..b80419320c7d 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -56,7 +56,6 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, struct page **pagep, bool wp_copy) { - struct mem_cgroup *memcg; pte_t _dst_pte, *dst_pte; spinlock_t *ptl; void *page_kaddr; @@ -77,7 +76,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, PAGE_SIZE); kunmap_atomic(page_kaddr); - /* fallback to copy_from_user outside mmap_sem */ + /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret = -ENOENT; *pagep = page; @@ -97,7 +96,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, __SetPageUptodate(page); ret = -ENOMEM; - if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) + if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL)) goto out_release; _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); @@ -124,7 +123,6 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, inc_mm_counter(dst_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, dst_vma, dst_addr, false); - mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, dst_vma); set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); @@ -138,7 +136,6 @@ out: return ret; out_release_uncharge_unlock: pte_unmap_unlock(dst_pte, ptl); - mem_cgroup_cancel_charge(page, memcg, false); out_release: put_page(page); goto out; @@ -203,7 +200,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) #ifdef CONFIG_HUGETLB_PAGE /* * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is - * called with mmap_sem held, it will release mmap_sem before returning. + * called with mmap_lock held, it will release mmap_lock before returning. */ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, @@ -231,7 +228,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, * feature is not supported. */ if (zeropage) { - up_read(&dst_mm->mmap_sem); + mmap_read_unlock(dst_mm); return -EINVAL; } @@ -250,7 +247,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, retry: /* - * On routine entry dst_vma is set. If we had to drop mmap_sem and + * On routine entry dst_vma is set. If we had to drop mmap_lock and * retry, dst_vma will be set to NULL and we must lookup again. */ if (!dst_vma) { @@ -318,7 +315,7 @@ retry: cond_resched(); if (unlikely(err == -ENOENT)) { - up_read(&dst_mm->mmap_sem); + mmap_read_unlock(dst_mm); BUG_ON(!page); err = copy_huge_page_from_user(page, @@ -329,7 +326,7 @@ retry: err = -EFAULT; goto out; } - down_read(&dst_mm->mmap_sem); + mmap_read_lock(dst_mm); dst_vma = NULL; goto retry; @@ -349,7 +346,7 @@ retry: } out_unlock: - up_read(&dst_mm->mmap_sem); + mmap_read_unlock(dst_mm); out: if (page) { /* @@ -360,7 +357,7 @@ out: * private and shared mappings. See the routine * restore_reserve_on_error for details. Unfortunately, we * can not call restore_reserve_on_error now as it would - * require holding mmap_sem. + * require holding mmap_lock. * * If a reservation for the page existed in the reservation * map of a private mapping, the map was modified to indicate @@ -488,7 +485,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, copied = 0; page = NULL; retry: - down_read(&dst_mm->mmap_sem); + mmap_read_lock(dst_mm); /* * If memory mappings are changing because of non-cooperative @@ -586,7 +583,7 @@ retry: if (unlikely(err == -ENOENT)) { void *page_kaddr; - up_read(&dst_mm->mmap_sem); + mmap_read_unlock(dst_mm); BUG_ON(!page); page_kaddr = kmap(page); @@ -615,7 +612,7 @@ retry: } out_unlock: - up_read(&dst_mm->mmap_sem); + mmap_read_unlock(dst_mm); out: if (page) put_page(page); @@ -655,7 +652,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, /* Does the address range wrap, or is the span zero-sized? */ BUG_ON(start + len <= start); - down_read(&dst_mm->mmap_sem); + mmap_read_lock(dst_mm); /* * If memory mappings are changing because of non-cooperative @@ -689,6 +686,6 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, err = 0; out_unlock: - up_read(&dst_mm->mmap_sem); + mmap_read_unlock(dst_mm); return err; } |