aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds2022-01-18 06:40:47 +0200
committerLinus Torvalds2022-01-18 06:40:47 +0200
commite3a8b6a1e70c37702054ae3c7c07ed828435d8ee (patch)
treef1e15d987dcfd84b61f370b9ab71f833513c5429
parent62b488875c0551822ac3b961d04800d4c7a655d9 (diff)
parent07f910f9b7295b6a28b337fedb56e612684c5659 (diff)
Merge tag 'slab-for-5.17-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull more slab updates from Vlastimil Babka: "Finish the conversion to struct slab by removing slab-specific fields from struct page. The first slab update (see merge commit ca1a46d6f506) did most of the conversion, but there was also series in iommu tree removing the iommu's usage of struct page 'freelist' field, blocking the final struct page cleanup. Now that the iommu changes have been merged, we can finish the job" * tag 'slab-for-5.17-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm: Remove slab from struct page
-rw-r--r--include/linux/mm_types.h28
-rw-r--r--include/linux/page-flags.h37
-rw-r--r--mm/slab.h6
3 files changed, 0 insertions, 71 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3764c1b51b02..9db36dc5d4cf 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -119,31 +119,6 @@ struct page {
atomic_long_t pp_frag_count;
};
};
- struct { /* slab, slob and slub */
- union {
- struct list_head slab_list;
- struct { /* Partial pages */
- struct page *next;
-#ifdef CONFIG_64BIT
- int pages; /* Nr of pages left */
-#else
- short int pages;
-#endif
- };
- };
- struct kmem_cache *slab_cache; /* not slob */
- /* Double-word boundary */
- void *freelist; /* first free object */
- union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- };
- };
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
@@ -207,9 +182,6 @@ struct page {
* which are currently stored here.
*/
unsigned int page_type;
-
- unsigned int active; /* SLAB */
- int units; /* SLOB */
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 129421002443..1c3b6e5c8bfd 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -920,43 +920,6 @@ extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY);
-/*
- * If network-based swap is enabled, sl*b must keep track of whether pages
- * were allocated from pfmemalloc reserves.
- */
-static inline int PageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- return PageActive(page);
-}
-
-/*
- * A version of PageSlabPfmemalloc() for opportunistic checks where the page
- * might have been freed under us and not be a PageSlab anymore.
- */
-static inline int __PageSlabPfmemalloc(struct page *page)
-{
- return PageActive(page);
-}
-
-static inline void SetPageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- SetPageActive(page);
-}
-
-static inline void __ClearPageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- __ClearPageActive(page);
-}
-
-static inline void ClearPageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- ClearPageActive(page);
-}
-
#ifdef CONFIG_MMU
#define __PG_MLOCKED (1UL << PG_mlocked)
#else
diff --git a/mm/slab.h b/mm/slab.h
index 7edb7d23f141..c7f2abc2b154 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -67,14 +67,8 @@ struct slab {
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
-SLAB_MATCH(slab_list, slab_list);
#ifndef CONFIG_SLOB
SLAB_MATCH(rcu_head, rcu_head);
-SLAB_MATCH(slab_cache, slab_cache);
-#endif
-#ifdef CONFIG_SLAB
-SLAB_MATCH(s_mem, s_mem);
-SLAB_MATCH(active, active);
#endif
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG