aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h10
-rw-r--r--include/linux/sched/coredump.h8
-rw-r--r--kernel/fork.c1
-rw-r--r--mm/gup.c19
6 files changed, 25 insertions, 17 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fc9784544b24..66965ad88d8b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1047,7 +1047,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
return false;
if (!is_cow_mapping(vma->vm_flags))
return false;
- if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+ if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
return false;
page = vm_normal_page(vma, addr, pte);
if (!page)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e39ed497578b..79f32962d7ae 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1341,7 +1341,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
if (!is_cow_mapping(vma->vm_flags))
return false;
- if (!atomic_read(&vma->vm_mm->has_pinned))
+ if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
return page_maybe_dma_pinned(page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8f0fb62e8975..b66d0225414e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -435,16 +435,6 @@ struct mm_struct {
*/
atomic_t mm_count;
- /**
- * @has_pinned: Whether this mm has pinned any pages. This can
- * be either replaced in the future by @pinned_vm when it
- * becomes stable, or grow into a counter on its own. We're
- * aggresive on this bit now - even if the pinned pages were
- * unpinned later on, we'll still keep this bit set for the
- * lifecycle of this mm just for simplicity.
- */
- atomic_t has_pinned;
-
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index dfd82eab2902..4d9e3a656875 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/kernel/fork.c b/kernel/fork.c
index a070caed5c8e..c6747d556ef9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1029,7 +1029,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
- atomic_set(&mm->has_pinned, 0);
atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
diff --git a/mm/gup.c b/mm/gup.c
index a6c20a7b3c49..8651309f8ec3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -420,6 +420,17 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
}
EXPORT_SYMBOL(unpin_user_pages);
+/*
+ * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
+ * lifecycle. Avoid setting the bit unless necessary, or it might cause write
+ * cache bouncing on large SMP machines for concurrent pinned gups.
+ */
+static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
+{
+ if (!test_bit(MMF_HAS_PINNED, mm_flags))
+ set_bit(MMF_HAS_PINNED, mm_flags);
+}
+
#ifdef CONFIG_MMU
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
@@ -1320,8 +1331,8 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
BUG_ON(*locked != 1);
}
- if ((flags & FOLL_PIN) && !atomic_read(&mm->has_pinned))
- atomic_set(&mm->has_pinned, 1);
+ if (flags & FOLL_PIN)
+ mm_set_has_pinned_flag(&mm->flags);
/*
* FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
@@ -2641,8 +2652,8 @@ static int internal_get_user_pages_fast(unsigned long start,
FOLL_FAST_ONLY)))
return -EINVAL;
- if ((gup_flags & FOLL_PIN) && !atomic_read(&current->mm->has_pinned))
- atomic_set(&current->mm->has_pinned, 1);
+ if (gup_flags & FOLL_PIN)
+ mm_set_has_pinned_flag(&current->mm->flags);
if (!(gup_flags & FOLL_FAST_ONLY))
might_lock_read(&current->mm->mmap_lock);