diff options
author | Vlastimil Babka | 2022-11-11 09:08:18 +0100 |
---|---|---|
committer | Vlastimil Babka | 2022-11-21 10:36:09 +0100 |
commit | 90e9b23a60d5b4c8317f58e01ed05d3bdf063440 (patch) | |
tree | b87f0bd1d2c0ab251fafcbc4aebd5e3c73799397 /mm/slab.h | |
parent | 76537db3b95cbf5d0189ce185c16db9f93017021 (diff) | |
parent | 946fa0dbf2d8923a587f7348adf16563d59f1b3d (diff) |
Merge branch 'slab/for-6.2/kmalloc_redzone' into slab/for-next
kmalloc() redzone improvements by Feng Tang
From cover letter [1]:
kmalloc's API family is critical for mm, and one of its nature is that
it will round up the request size to a fixed one (mostly power of 2).
When user requests memory for '2^n + 1' bytes, actually 2^(n+1) bytes
could be allocated, so there is an extra space than what is originally
requested.
This patchset tries to extend the redzone sanity check to the extra
kmalloced buffer than requested, to better detect un-legitimate access
to it. (depends on SLAB_STORE_USER & SLAB_RED_ZONE)
[1] https://lore.kernel.org/all/20221021032405.1825078-1-feng.tang@intel.com/
Diffstat (limited to 'mm/slab.h')
-rw-r--r-- | mm/slab.h | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/mm/slab.h b/mm/slab.h index 8c4aafb00bd6..e3b3231af742 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -730,13 +730,27 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, static inline void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, gfp_t flags, - size_t size, void **p, bool init) + size_t size, void **p, bool init, + unsigned int orig_size) { + unsigned int zero_size = s->object_size; size_t i; flags &= gfp_allowed_mask; /* + * For kmalloc object, the allocated memory size(object_size) is likely + * larger than the requested size(orig_size). If redzone check is + * enabled for the extra space, don't zero it, as it will be redzoned + * soon. The redzone operation for this extra space could be seen as a + * replacement of current poisoning under certain debug option, and + * won't break other sanity checks. + */ + if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && + (s->flags & SLAB_KMALLOC)) + zero_size = orig_size; + + /* * As memory initialization might be integrated into KASAN, * kasan_slab_alloc and initialization memset must be * kept together to avoid discrepancies in behavior. @@ -746,7 +760,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, for (i = 0; i < size; i++) { p[i] = kasan_slab_alloc(s, p[i], flags, init); if (p[i] && init && !kasan_has_integrated_init()) - memset(p[i], 0, s->object_size); + memset(p[i], 0, zero_size); kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags); kmsan_slab_alloc(s, p[i], flags); @@ -881,4 +895,8 @@ void __check_heap_object(const void *ptr, unsigned long n, } #endif +#ifdef CONFIG_SLUB_DEBUG +void skip_orig_size_check(struct kmem_cache *s, const void *object); +#endif + #endif /* MM_SLAB_H */ |