From bdc1c5fac982845a58d28690cdb56db8c88a530d Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Fri, 5 May 2023 20:30:20 +0000 Subject: binder: fix UAF caused by faulty buffer cleanup In binder_transaction_buffer_release() the 'failed_at' offset indicates the number of objects to clean up. However, this function was changed by commit 44d8047f1d87 ("binder: use standard functions to allocate fds"), to release all the objects in the buffer when 'failed_at' is zero. This introduced an issue when a transaction buffer is released without any objects having been processed so far. In this case, 'failed_at' is indeed zero yet it is misinterpreted as releasing the entire buffer. This leads to use-after-free errors where nodes are incorrectly freed and subsequently accessed. Such is the case in the following KASAN report: ================================================================== BUG: KASAN: slab-use-after-free in binder_thread_read+0xc40/0x1f30 Read of size 8 at addr ffff4faf037cfc58 by task poc/474 CPU: 6 PID: 474 Comm: poc Not tainted 6.3.0-12570-g7df047b3f0aa #5 Hardware name: linux,dummy-virt (DT) Call trace: dump_backtrace+0x94/0xec show_stack+0x18/0x24 dump_stack_lvl+0x48/0x60 print_report+0xf8/0x5b8 kasan_report+0xb8/0xfc __asan_load8+0x9c/0xb8 binder_thread_read+0xc40/0x1f30 binder_ioctl+0xd9c/0x1768 __arm64_sys_ioctl+0xd4/0x118 invoke_syscall+0x60/0x188 [...] Allocated by task 474: kasan_save_stack+0x3c/0x64 kasan_set_track+0x2c/0x40 kasan_save_alloc_info+0x24/0x34 __kasan_kmalloc+0xb8/0xbc kmalloc_trace+0x48/0x5c binder_new_node+0x3c/0x3a4 binder_transaction+0x2b58/0x36f0 binder_thread_write+0x8e0/0x1b78 binder_ioctl+0x14a0/0x1768 __arm64_sys_ioctl+0xd4/0x118 invoke_syscall+0x60/0x188 [...] Freed by task 475: kasan_save_stack+0x3c/0x64 kasan_set_track+0x2c/0x40 kasan_save_free_info+0x38/0x5c __kasan_slab_free+0xe8/0x154 __kmem_cache_free+0x128/0x2bc kfree+0x58/0x70 binder_dec_node_tmpref+0x178/0x1fc binder_transaction_buffer_release+0x430/0x628 binder_transaction+0x1954/0x36f0 binder_thread_write+0x8e0/0x1b78 binder_ioctl+0x14a0/0x1768 __arm64_sys_ioctl+0xd4/0x118 invoke_syscall+0x60/0x188 [...] ================================================================== In order to avoid these issues, let's always calculate the intended 'failed_at' offset beforehand. This is renamed and wrapped in a helper function to make it clear and convenient. Fixes: 32e9f56a96d8 ("binder: don't detect sender/target during buffer cleanup") Reported-by: Zi Fan Tan Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas Acked-by: Todd Kjos Link: https://lore.kernel.org/r/20230505203020.4101154-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'drivers/android') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index fb56bfc45096..8fb7672021ee 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd) static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, - binder_size_t failed_at, + binder_size_t off_end_offset, bool is_failure) { int debug_id = buffer->debug_id; - binder_size_t off_start_offset, buffer_offset, off_end_offset; + binder_size_t off_start_offset, buffer_offset; binder_debug(BINDER_DEBUG_TRANSACTION, "%d buffer release %d, size %zd-%zd, failed at %llx\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, - (unsigned long long)failed_at); + (unsigned long long)off_end_offset); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); - off_end_offset = is_failure && failed_at ? failed_at : - off_start_offset + buffer->offsets_size; + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; @@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } } +/* Clean up all the objects in the buffer */ +static inline void binder_release_entire_buffer(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_buffer *buffer, + bool is_failure) +{ + binder_size_t off_end_offset; + + off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); + off_end_offset += buffer->offsets_size; + + binder_transaction_buffer_release(proc, thread, buffer, + off_end_offset, is_failure); +} + static int binder_translate_binder(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) @@ -2806,7 +2820,7 @@ static int binder_proc_transaction(struct binder_transaction *t, t_outdated->buffer = NULL; buffer->transaction = NULL; trace_binder_transaction_update_buffer_release(buffer); - binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); + binder_release_entire_buffer(proc, NULL, buffer, false); binder_alloc_free_buf(&proc->alloc, buffer); kfree(t_outdated); binder_stats_deleted(BINDER_STAT_TRANSACTION); @@ -3775,7 +3789,7 @@ binder_free_buf(struct binder_proc *proc, binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); + binder_release_entire_buffer(proc, thread, buffer, is_failure); binder_alloc_free_buf(&proc->alloc, buffer); } -- cgit v1.2.3 From b15655b12ddca7ade09807f790bafb6fab61b50a Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Tue, 2 May 2023 20:12:17 +0000 Subject: Revert "binder_alloc: add missing mmap_lock calls when using the VMA" This reverts commit 44e602b4e52f70f04620bbbf4fe46ecb40170bde. This caused a performance regression particularly when pages are getting reclaimed. We don't need to acquire the mmap_lock to determine when the binder buffer has been fully initialized. A subsequent patch will bring back the lockless approach for this. [cmllamas: resolved trivial conflicts with renaming of alloc->mm] Fixes: 44e602b4e52f ("binder_alloc: add missing mmap_lock calls when using the VMA") Cc: Liam Howlett Cc: Suren Baghdasaryan Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20230502201220.1756319-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) (limited to 'drivers/android') diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 55a3c3c2409f..92c814ec44fe 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -380,15 +380,12 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; - mmap_read_lock(alloc->mm); if (!binder_alloc_get_vma(alloc)) { - mmap_read_unlock(alloc->mm); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", alloc->pid); return ERR_PTR(-ESRCH); } - mmap_read_unlock(alloc->mm); data_offsets_size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); @@ -916,25 +913,17 @@ void binder_alloc_print_pages(struct seq_file *m, * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. */ - - mmap_read_lock(alloc->mm); - if (binder_alloc_get_vma(alloc) == NULL) { - mmap_read_unlock(alloc->mm); - goto uninitialized; - } - - mmap_read_unlock(alloc->mm); - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - page = &alloc->pages[i]; - if (!page->page_ptr) - free++; - else if (list_empty(&page->lru)) - active++; - else - lru++; + if (binder_alloc_get_vma(alloc) != NULL) { + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } } - -uninitialized: mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); -- cgit v1.2.3 From c0fd2101781ef761b636769b2f445351f71c3626 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Tue, 2 May 2023 20:12:18 +0000 Subject: Revert "android: binder: stop saving a pointer to the VMA" This reverts commit a43cfc87caaf46710c8027a8c23b8a55f1078f19. This patch fixed an issue reported by syzkaller in [1]. However, this turned out to be only a band-aid in binder. The root cause, as bisected by syzkaller, was fixed by commit 5789151e48ac ("mm/mmap: undo ->mmap() when mas_preallocate() fails"). We no longer need the patch for binder. Reverting such patch allows us to have a lockless access to alloc->vma in specific cases where the mmap_lock is not required. This approach avoids the contention that caused a performance regression. [1] https://lore.kernel.org/all/0000000000004a0dbe05e1d749e0@google.com [cmllamas: resolved conflicts with rework of alloc->mm and removal of binder_alloc_set_vma() also fixed comment section] Fixes: a43cfc87caaf ("android: binder: stop saving a pointer to the VMA") Cc: Liam Howlett Cc: Suren Baghdasaryan Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20230502201220.1756319-2-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 17 +++++++++-------- drivers/android/binder_alloc.h | 4 ++-- drivers/android/binder_alloc_selftest.c | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/android') diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 92c814ec44fe..eb082b33115b 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, if (mm) { mmap_read_lock(mm); - vma = vma_lookup(mm, alloc->vma_addr); + vma = alloc->vma; } if (!vma && need_mm) { @@ -314,9 +314,11 @@ static inline struct vm_area_struct *binder_alloc_get_vma( { struct vm_area_struct *vma = NULL; - if (alloc->vma_addr) - vma = vma_lookup(alloc->mm, alloc->vma_addr); - + if (alloc->vma) { + /* Look at description in binder_alloc_set_vma */ + smp_rmb(); + vma = alloc->vma; + } return vma; } @@ -775,7 +777,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - alloc->vma_addr = vma->vm_start; + alloc->vma = vma; return 0; @@ -805,8 +807,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) buffers = 0; mutex_lock(&alloc->mutex); - BUG_ON(alloc->vma_addr && - vma_lookup(alloc->mm, alloc->vma_addr)); + BUG_ON(alloc->vma); while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -958,7 +959,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - alloc->vma_addr = 0; + alloc->vma = 0; } /** diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 0f811ac4bcff..138d1d5af9ce 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -75,7 +75,7 @@ struct binder_lru_page { /** * struct binder_alloc - per-binder proc state for binder allocator * @mutex: protects binder_alloc fields - * @vma_addr: vm_area_struct->vm_start passed to mmap_handler + * @vma: vm_area_struct passed to mmap_handler * (invariant after mmap) * @mm: copy of task->mm (invariant after open) * @buffer: base of per-proc address space mapped via mmap @@ -99,7 +99,7 @@ struct binder_lru_page { */ struct binder_alloc { struct mutex mutex; - unsigned long vma_addr; + struct vm_area_struct *vma; struct mm_struct *mm; void __user *buffer; struct list_head buffers; diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index 43a881073a42..c2b323bc3b3a 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc) if (!binder_selftest_run) return; mutex_lock(&binder_selftest_lock); - if (!binder_selftest_run || !alloc->vma_addr) + if (!binder_selftest_run || !alloc->vma) goto done; pr_info("STARTED\n"); binder_selftest_alloc_offset(alloc, end_offset, 0); -- cgit v1.2.3 From 0fa53349c3acba0239369ba4cd133740a408d246 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Tue, 2 May 2023 20:12:19 +0000 Subject: binder: add lockless binder_alloc_(set|get)_vma() Bring back the original lockless design in binder_alloc to determine whether the buffer setup has been completed by the ->mmap() handler. However, this time use smp_load_acquire() and smp_store_release() to wrap all the ordering in a single macro call. Also, add comments to make it evident that binder uses alloc->vma to determine when the binder_alloc has been fully initialized. In these scenarios acquiring the mmap_lock is not required. Fixes: a43cfc87caaf ("android: binder: stop saving a pointer to the VMA") Cc: Liam Howlett Cc: Suren Baghdasaryan Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20230502201220.1756319-3-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/android') diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index eb082b33115b..e7c9d466f8e8 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -309,17 +309,18 @@ err_no_vma: return vma ? -ENOMEM : -ESRCH; } +static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + /* pairs with smp_load_acquire in binder_alloc_get_vma() */ + smp_store_release(&alloc->vma, vma); +} + static inline struct vm_area_struct *binder_alloc_get_vma( struct binder_alloc *alloc) { - struct vm_area_struct *vma = NULL; - - if (alloc->vma) { - /* Look at description in binder_alloc_set_vma */ - smp_rmb(); - vma = alloc->vma; - } - return vma; + /* pairs with smp_store_release in binder_alloc_set_vma() */ + return smp_load_acquire(&alloc->vma); } static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) @@ -382,6 +383,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; + /* Check binder_alloc is fully initialized */ if (!binder_alloc_get_vma(alloc)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", @@ -777,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - alloc->vma = vma; + + /* Signal binder_alloc is fully initialized */ + binder_alloc_set_vma(alloc, vma); return 0; @@ -959,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - alloc->vma = 0; + binder_alloc_set_vma(alloc, NULL); } /** -- cgit v1.2.3 From d1d8875c8c13517f6fd1ff8d4d3e1ac366a17e07 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Fri, 19 May 2023 19:59:49 +0000 Subject: binder: fix UAF of alloc->vma in race with munmap() [ cmllamas: clean forward port from commit 015ac18be7de ("binder: fix UAF of alloc->vma in race with munmap()") in 5.10 stable. It is needed in mainline after the revert of commit a43cfc87caaf ("android: binder: stop saving a pointer to the VMA") as pointed out by Liam. The commit log and tags have been tweaked to reflect this. ] In commit 720c24192404 ("ANDROID: binder: change down_write to down_read") binder assumed the mmap read lock is sufficient to protect alloc->vma inside binder_update_page_range(). This used to be accurate until commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"), which now downgrades the mmap_lock after detaching the vma from the rbtree in munmap(). Then it proceeds to teardown and free the vma with only the read lock held. This means that accesses to alloc->vma in binder_update_page_range() now will race with vm_area_free() in munmap() and can cause a UAF as shown in the following KASAN trace: ================================================================== BUG: KASAN: use-after-free in vm_insert_page+0x7c/0x1f0 Read of size 8 at addr ffff16204ad00600 by task server/558 CPU: 3 PID: 558 Comm: server Not tainted 5.10.150-00001-gdc8dcf942daa #1 Hardware name: linux,dummy-virt (DT) Call trace: dump_backtrace+0x0/0x2a0 show_stack+0x18/0x2c dump_stack+0xf8/0x164 print_address_description.constprop.0+0x9c/0x538 kasan_report+0x120/0x200 __asan_load8+0xa0/0xc4 vm_insert_page+0x7c/0x1f0 binder_update_page_range+0x278/0x50c binder_alloc_new_buf+0x3f0/0xba0 binder_transaction+0x64c/0x3040 binder_thread_write+0x924/0x2020 binder_ioctl+0x1610/0x2e5c __arm64_sys_ioctl+0xd4/0x120 el0_svc_common.constprop.0+0xac/0x270 do_el0_svc+0x38/0xa0 el0_svc+0x1c/0x2c el0_sync_handler+0xe8/0x114 el0_sync+0x180/0x1c0 Allocated by task 559: kasan_save_stack+0x38/0x6c __kasan_kmalloc.constprop.0+0xe4/0xf0 kasan_slab_alloc+0x18/0x2c kmem_cache_alloc+0x1b0/0x2d0 vm_area_alloc+0x28/0x94 mmap_region+0x378/0x920 do_mmap+0x3f0/0x600 vm_mmap_pgoff+0x150/0x17c ksys_mmap_pgoff+0x284/0x2dc __arm64_sys_mmap+0x84/0xa4 el0_svc_common.constprop.0+0xac/0x270 do_el0_svc+0x38/0xa0 el0_svc+0x1c/0x2c el0_sync_handler+0xe8/0x114 el0_sync+0x180/0x1c0 Freed by task 560: kasan_save_stack+0x38/0x6c kasan_set_track+0x28/0x40 kasan_set_free_info+0x24/0x4c __kasan_slab_free+0x100/0x164 kasan_slab_free+0x14/0x20 kmem_cache_free+0xc4/0x34c vm_area_free+0x1c/0x2c remove_vma+0x7c/0x94 __do_munmap+0x358/0x710 __vm_munmap+0xbc/0x130 __arm64_sys_munmap+0x4c/0x64 el0_svc_common.constprop.0+0xac/0x270 do_el0_svc+0x38/0xa0 el0_svc+0x1c/0x2c el0_sync_handler+0xe8/0x114 el0_sync+0x180/0x1c0 [...] ================================================================== To prevent the race above, revert back to taking the mmap write lock inside binder_update_page_range(). One might expect an increase of mmap lock contention. However, binder already serializes these calls via top level alloc->mutex. Also, there was no performance impact shown when running the binder benchmark tests. Fixes: c0fd2101781e ("Revert "android: binder: stop saving a pointer to the VMA"") Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap") Reported-by: Jann Horn Closes: https://lore.kernel.org/all/20230518144052.xkj6vmddccq4v66b@revolver Cc: Cc: Minchan Kim Cc: Yang Shi Cc: Liam Howlett Signed-off-by: Carlos Llamas Acked-by: Todd Kjos Link: https://lore.kernel.org/r/20230519195950.1775656-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/android') diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index e7c9d466f8e8..662a2a2e2e84 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, mm = alloc->mm; if (mm) { - mmap_read_lock(mm); + mmap_write_lock(mm); vma = alloc->vma; } @@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, trace_binder_alloc_page_end(alloc, index); } if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return 0; @@ -303,7 +303,7 @@ err_page_ptr_cleared: } err_no_vma: if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return vma ? -ENOMEM : -ESRCH; -- cgit v1.2.3