aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJoel Fernandes (Google)2023-09-04 18:08:04 +0000
committerGreg Kroah-Hartman2023-09-13 09:43:00 +0200
commit4245ca8f4051459beebb70f189329b670efa32a1 (patch)
treecb648b23e07eb56f8843cc3d20fa49b0c5d75dfb /mm
parent157c46360cf3afc32c3864543301c6c3dd00b3a5 (diff)
mm/vmalloc: add a safer version of find_vm_area() for debug
commit 0818e739b5c061b0251c30152380600fb9b84c0c upstream. It is unsafe to dump vmalloc area information when trying to do so from some contexts. Add a safer trylock version of the same function to do a best-effort VMA finding and use it from vmalloc_dump_obj(). [applied test robot feedback on unused function fix.] [applied Uladzislau feedback on locking.] Link: https://lkml.kernel.org/r/20230904180806.1002832-1-joel@joelfernandes.org Fixes: 98f180837a89 ("mm: Make mem_dump_obj() handle vmalloc() memory") Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reported-by: Zhen Lei <thunder.leizhen@huaweicloud.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Zqiang <qiang.zhang1211@gmail.com> Cc: <stable@vger.kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 80bd104a4d42..67a10a04df04 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4041,14 +4041,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PRINTK
bool vmalloc_dump_obj(void *object)
{
- struct vm_struct *vm;
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
+ const void *caller;
+ struct vm_struct *vm;
+ struct vmap_area *va;
+ unsigned long addr;
+ unsigned int nr_pages;
+
+ if (!spin_trylock(&vmap_area_lock))
+ return false;
+ va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
+ if (!va) {
+ spin_unlock(&vmap_area_lock);
+ return false;
+ }
- vm = find_vm_area(objp);
- if (!vm)
+ vm = va->vm;
+ if (!vm) {
+ spin_unlock(&vmap_area_lock);
return false;
+ }
+ addr = (unsigned long)vm->addr;
+ caller = vm->caller;
+ nr_pages = vm->nr_pages;
+ spin_unlock(&vmap_area_lock);
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
- vm->nr_pages, (unsigned long)vm->addr, vm->caller);
+ nr_pages, addr, caller);
return true;
}
#endif