aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds2022-12-10 17:10:52 -0800
committerLinus Torvalds2022-12-10 17:10:52 -0800
commit4cee37b3a4e68c42b867c87a6218e11bc571ba66 (patch)
tree52cd494ae1dc635f6a0f6d4fa4629b5d6dfe4328 /mm
parent296a7b7eb79246912de31ee799cb85220931231a (diff)
parent4a7ba45b1a435e7097ca0f79a847d0949d0eb088 (diff)
Merge tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Nine hotfixes. Six for MM, three for other areas. Four of these patches address post-6.0 issues" * tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: memcg: fix possible use-after-free in memcg_write_event_control() MAINTAINERS: update Muchun Song's email mm/gup: fix gup_pud_range() for dax mmap: fix do_brk_flags() modifying obviously incorrect VMAs mm/swap: fix SWP_PFN_BITS with CONFIG_PHYS_ADDR_T_64BIT on 32bit tmpfs: fix data loss from failed fallocate kselftests: cgroup: update kmem test precision tolerance mm: do not BUG_ON missing brk mapping, because userspace can unmap it mailmap: update Matti Vaittinen's email address
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c2
-rw-r--r--mm/mmap.c14
-rw-r--r--mm/shmem.c11
3 files changed, 16 insertions, 11 deletions
diff --git a/mm/gup.c b/mm/gup.c
index fe195d47de74..3b7bc2c1fd44 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
next = pud_addr_end(addr, end);
if (unlikely(!pud_present(pud)))
return 0;
- if (unlikely(pud_huge(pud))) {
+ if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags,
pages, nr))
return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index a5eb2f175da0..54abd46e6007 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/* Search one past newbrk */
mas_set(&mas, newbrk);
brkvma = mas_find(&mas, oldbrk);
- BUG_ON(brkvma == NULL);
- if (brkvma->vm_start >= oldbrk)
+ if (!brkvma || brkvma->vm_start >= oldbrk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
@@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
* Expand the existing vma if possible; Note that singular lists do not
* occur after forking, so the expand will only happen on new VMAs.
*/
- if (vma &&
- (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
- ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
+ if (vma && vma->vm_end == addr && !vma_policy(vma) &&
+ can_vma_merge_after(vma, flags, NULL, NULL,
+ addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
mas_set_range(mas, vma->vm_start, addr + len - 1);
if (mas_preallocate(mas, vma, GFP_KERNEL))
return -ENOMEM;
@@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
goto munmap_failed;
vma = mas_prev(&mas, 0);
- if (!vma || vma->vm_end != addr || vma_policy(vma) ||
- !can_vma_merge_after(vma, flags, NULL, NULL,
- addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
- vma = NULL;
-
ret = do_brk_flags(&mas, vma, addr, len, flags);
populate = ((mm->def_flags & VM_LOCKED) != 0);
mmap_write_unlock(mm);
diff --git a/mm/shmem.c b/mm/shmem.c
index c1d8b8a1aa3b..82911fefc2d5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index++;
}
+ /*
+ * When undoing a failed fallocate, we want none of the partial folio
+ * zeroing and splitting below, but shall want to truncate the whole
+ * folio when !uptodate indicates that it was added by this fallocate,
+ * even when [lstart, lend] covers only a part of the folio.
+ */
+ if (unfalloc)
+ goto whole_folios;
+
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
if (folio) {
@@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
folio_put(folio);
}
+whole_folios:
+
index = start;
while (index < end) {
cond_resched();