diff options
author | Vlastimil Babka | 2023-11-14 22:12:47 +0100 |
---|---|---|
committer | Vlastimil Babka | 2023-12-28 19:18:18 +0100 |
commit | 782f8906f8057efc7151b4b98b0a0280a71d005f (patch) | |
tree | 2ecddbe03bf673832d2e8f7f80124a5fca09ba07 /mm | |
parent | 284f17ac13fe34ae9eecbe57bb91553374d9b855 (diff) |
mm/slub: free KFENCE objects in slab_free_hook()
When freeing an object that was allocated from KFENCE, we do that in the
slowpath __slab_free(), relying on the fact that KFENCE "slab" cannot be
the cpu slab, so the fastpath has to fallback to the slowpath.
This optimization doesn't help much though, because is_kfence_address()
is checked earlier anyway during the free hook processing or detached
freelist building. Thus we can simplify the code by making the
slab_free_hook() free the KFENCE object immediately, similarly to KASAN
quarantine.
In slab_free_hook() we can place kfence_free() above init processing, as
callers have been making sure to set init to false for KFENCE objects.
This simplifies slab_free(). This places it also above kasan_slab_free()
which is ok as that skips KFENCE objects anyway.
While at it also determine the init value in slab_free_freelist_hook()
outside of the loop.
This change will also make introducing per cpu array caches easier.
Tested-by: Marco Elver <elver@google.com>
Reviewed-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c index af8c8fc9e799..ccd57636b739 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2053,7 +2053,7 @@ void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, * production configuration these hooks all should produce no code at all. * * Returns true if freeing of the object can proceed, false if its reuse - * was delayed by KASAN quarantine. + * was delayed by KASAN quarantine, or it was returned to KFENCE. */ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x, bool init) @@ -2071,6 +2071,9 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init) __kcsan_check_access(x, s->object_size, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); + if (kfence_free(x)) + return false; + /* * As memory initialization might be integrated into KASAN, * kasan_slab_free and initialization memset's must be @@ -2100,23 +2103,25 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *object; void *next = *head; void *old_tail = *tail; + bool init; if (is_kfence_address(next)) { slab_free_hook(s, next, false); - return true; + return false; } /* Head and tail of the reconstructed freelist */ *head = NULL; *tail = NULL; + init = slab_want_init_on_free(s); + do { object = next; next = get_freepointer(s, object); /* If object's reuse doesn't have to be delayed */ - if (likely(slab_free_hook(s, object, - slab_want_init_on_free(s)))) { + if (likely(slab_free_hook(s, object, init))) { /* Move object to the new freelist */ set_freepointer(s, object, *head); *head = object; @@ -4117,9 +4122,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, stat(s, FREE_SLOWPATH); - if (kfence_free(head)) - return; - if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { free_to_partial_list(s, slab, head, tail, cnt, addr); return; @@ -4304,13 +4306,9 @@ static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab, void *object, unsigned long addr) { - bool init; - memcg_slab_free_hook(s, slab, &object, 1); - init = !is_kfence_address(object) && slab_want_init_on_free(s); - - if (likely(slab_free_hook(s, object, init))) + if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) do_slab_free(s, slab, object, object, 1, addr); } |