aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Hansen2014-04-08 13:44:27 -0700
committerPekka Enberg2014-04-11 10:06:06 +0300
commit34bf6ef94a835a8f1d8abd3e7d38c6c08d205867 (patch)
treea5e285e441036ed1d78033192b7eaf74300f4984
parent5f0985bb1123b48bbfc632006bdbe76d3dfea76b (diff)
mm: slab/slub: use page->list consistently instead of page->lru
'struct page' has two list_head fields: 'lru' and 'list'. Conveniently, they are unioned together. This means that code can use them interchangably, which gets horribly confusing like with this nugget from slab.c: > list_del(&page->lru); > if (page->active == cachep->num) > list_add(&page->list, &n->slabs_full); This patch makes the slab and slub code use page->lru universally instead of mixing ->list and ->lru. So, the new rule is: page->lru is what the you use if you want to keep your page on a list. Don't like the fact that it's not called ->list? Too bad. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c10
3 files changed, 9 insertions, 8 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 290901a8c1de..84b74080beb7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -124,6 +124,8 @@ struct page {
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
+ * Can be used as a generic list
+ * by the page owner.
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
@@ -136,7 +138,6 @@ struct page {
#endif
};
- struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
diff --git a/mm/slab.c b/mm/slab.c
index 8dd8e0875e4c..f6718197cdd0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2922,9 +2922,9 @@ retry:
/* move slabp to correct slabp list: */
list_del(&page->lru);
if (page->active == cachep->num)
- list_add(&page->list, &n->slabs_full);
+ list_add(&page->lru, &n->slabs_full);
else
- list_add(&page->list, &n->slabs_partial);
+ list_add(&page->lru, &n->slabs_partial);
}
must_grow:
diff --git a/mm/slob.c b/mm/slob.c
index 4bf8809dfcce..730cad45d4be 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
static void set_slob_page_free(struct page *sp, struct list_head *list)
{
- list_add(&sp->list, list);
+ list_add(&sp->lru, list);
__SetPageSlobFree(sp);
}
static inline void clear_slob_page_free(struct page *sp)
{
- list_del(&sp->list);
+ list_del(&sp->lru);
__ClearPageSlobFree(sp);
}
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, slob_list, list) {
+ list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
continue;
/* Attempt to alloc */
- prev = sp->list.prev;
+ prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align);
if (!b)
continue;
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
sp->freelist = b;
- INIT_LIST_HEAD(&sp->list);
+ INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);