diff options
author | Hyeonggon Yoo | 2022-08-17 19:18:19 +0900 |
---|---|---|
committer | Vlastimil Babka | 2022-08-24 16:11:41 +0200 |
commit | d6a71648dbc0ca5520cba16a8fdce8d37ae74218 (patch) | |
tree | b15d1f620d72682e4c169024d798307f5792a62d /mm/slub.c | |
parent | c4cab557521a73bd803e5c6f613b4e00bd3c4662 (diff) |
mm/slab: kmalloc: pass requests larger than order-1 page to page allocator
There is not much benefit for serving large objects in kmalloc().
Let's pass large requests to page allocator like SLUB for better
maintenance of common code.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 19 |
1 files changed, 0 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c index 165fe87af204..a659874c5d44 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1704,12 +1704,6 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. */ -static __always_inline void kfree_hook(void *x) -{ - kmemleak_free(x); - kasan_kfree_large(x); -} - static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x, bool init) { @@ -3550,19 +3544,6 @@ struct detached_freelist { struct kmem_cache *s; }; -static inline void free_large_kmalloc(struct folio *folio, void *object) -{ - unsigned int order = folio_order(folio); - - if (WARN_ON_ONCE(order == 0)) - pr_warn_once("object pointer: 0x%p\n", object); - - kfree_hook(object); - mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B, - -(PAGE_SIZE << order)); - __free_pages(folio_page(folio, 0), order); -} - /* * This function progressively scans the array with free objects (with * a limited look ahead) and extract objects belonging to the same |