aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h33
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c25
3 files changed, 40 insertions, 24 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4c5363566815..1f611ba00f1d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -545,22 +545,37 @@ struct memcg_cache_array {
* array to be accessed without taking any locks, on relocation we free the old
* version only after a grace period.
*
- * Child caches will hold extra metadata needed for its operation. Fields are:
+ * Root and child caches hold different metadata.
*
- * @memcg: pointer to the memcg this cache belongs to
- * @root_cache: pointer to the global, root cache, this cache was derived from
+ * @root_cache: Common to root and child caches. NULL for root, pointer to
+ * the root cache for children.
*
- * Both root and child caches of the same kind are linked into a list chained
- * through @list.
+ * The following fields are specific to root caches.
+ *
+ * @memcg_caches: kmemcg ID indexed table of child caches. This table is
+ * used to index child cachces during allocation and cleared
+ * early during shutdown.
+ *
+ * @children: List of all child caches. While the child caches are also
+ * reachable through @memcg_caches, a child cache remains on
+ * this list until it is actually destroyed.
+ *
+ * The following fields are specific to child caches.
+ *
+ * @memcg: Pointer to the memcg this cache belongs to.
+ *
+ * @children_node: List node for @root_cache->children list.
*/
struct memcg_cache_params {
- bool is_root_cache;
- struct list_head list;
+ struct kmem_cache *root_cache;
union {
- struct memcg_cache_array __rcu *memcg_caches;
+ struct {
+ struct memcg_cache_array __rcu *memcg_caches;
+ struct list_head children;
+ };
struct {
struct mem_cgroup *memcg;
- struct kmem_cache *root_cache;
+ struct list_head children_node;
};
};
};
diff --git a/mm/slab.h b/mm/slab.h
index d07563f37f33..3ed3336883ed 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -206,12 +206,12 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
* slab_mutex.
*/
#define for_each_memcg_cache(iter, root) \
- list_for_each_entry(iter, &(root)->memcg_params.list, \
- memcg_params.list)
+ list_for_each_entry(iter, &(root)->memcg_params.children, \
+ memcg_params.children_node)
static inline bool is_root_cache(struct kmem_cache *s)
{
- return s->memcg_params.is_root_cache;
+ return !s->memcg_params.root_cache;
}
static inline bool slab_equal_or_root(struct kmem_cache *s,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ac469c815587..c3885032dbce 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -140,9 +140,9 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
void slab_init_memcg_params(struct kmem_cache *s)
{
- s->memcg_params.is_root_cache = true;
- INIT_LIST_HEAD(&s->memcg_params.list);
+ s->memcg_params.root_cache = NULL;
RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
+ INIT_LIST_HEAD(&s->memcg_params.children);
}
static int init_memcg_params(struct kmem_cache *s,
@@ -150,10 +150,10 @@ static int init_memcg_params(struct kmem_cache *s,
{
struct memcg_cache_array *arr;
- if (memcg) {
- s->memcg_params.is_root_cache = false;
- s->memcg_params.memcg = memcg;
+ if (root_cache) {
s->memcg_params.root_cache = root_cache;
+ s->memcg_params.memcg = memcg;
+ INIT_LIST_HEAD(&s->memcg_params.children_node);
return 0;
}
@@ -223,7 +223,7 @@ int memcg_update_all_caches(int num_memcgs)
static void unlink_memcg_cache(struct kmem_cache *s)
{
- list_del(&s->memcg_params.list);
+ list_del(&s->memcg_params.children_node);
}
#else
static inline int init_memcg_params(struct kmem_cache *s,
@@ -594,7 +594,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
goto out_unlock;
}
- list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
+ list_add(&s->memcg_params.children_node,
+ &root_cache->memcg_params.children);
/*
* Since readers won't lock (see cache_from_memcg_idx()), we need a
@@ -690,7 +691,7 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
* list so as not to try to destroy it for a second
* time while iterating over inactive caches below.
*/
- list_move(&c->memcg_params.list, &busy);
+ list_move(&c->memcg_params.children_node, &busy);
else
/*
* The cache is empty and will be destroyed soon. Clear
@@ -705,17 +706,17 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
* Second, shutdown all caches left from memory cgroups that are now
* offline.
*/
- list_for_each_entry_safe(c, c2, &s->memcg_params.list,
- memcg_params.list)
+ list_for_each_entry_safe(c, c2, &s->memcg_params.children,
+ memcg_params.children_node)
shutdown_cache(c);
- list_splice(&busy, &s->memcg_params.list);
+ list_splice(&busy, &s->memcg_params.children);
/*
* A cache being destroyed must be empty. In particular, this means
* that all per memcg caches attached to it must be empty too.
*/
- if (!list_empty(&s->memcg_params.list))
+ if (!list_empty(&s->memcg_params.children))
return -EBUSY;
return 0;
}