diff options
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 90 |
1 files changed, 54 insertions, 36 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 0b7bb399b0e4..8e40321da091 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -171,13 +171,26 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, struct kmem_cache *parent_cache) { struct kmem_cache *s = NULL; - int err = 0; + int err; get_online_cpus(); mutex_lock(&slab_mutex); - if (!kmem_cache_sanity_check(memcg, name, size) == 0) - goto out_locked; + err = kmem_cache_sanity_check(memcg, name, size); + if (err) + goto out_unlock; + + if (memcg) { + /* + * Since per-memcg caches are created asynchronously on first + * allocation (see memcg_kmem_get_cache()), several threads can + * try to create the same cache, but only one of them may + * succeed. Therefore if we get here and see the cache has + * already been created, we silently return NULL. + */ + if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg))) + goto out_unlock; + } /* * Some allocators will constraint the set of valid flags to a subset @@ -189,45 +202,45 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); if (s) - goto out_locked; + goto out_unlock; + err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); - if (s) { - s->object_size = s->size = size; - s->align = calculate_alignment(flags, align, size); - s->ctor = ctor; + if (!s) + goto out_unlock; - if (memcg_register_cache(memcg, s, parent_cache)) { - kmem_cache_free(kmem_cache, s); - err = -ENOMEM; - goto out_locked; - } + s->object_size = s->size = size; + s->align = calculate_alignment(flags, align, size); + s->ctor = ctor; - s->name = kstrdup(name, GFP_KERNEL); - if (!s->name) { - kmem_cache_free(kmem_cache, s); - err = -ENOMEM; - goto out_locked; - } + s->name = kstrdup(name, GFP_KERNEL); + if (!s->name) + goto out_free_cache; - err = __kmem_cache_create(s, flags); - if (!err) { - s->refcount = 1; - list_add(&s->list, &slab_caches); - memcg_cache_list_add(memcg, s); - } else { - kfree(s->name); - kmem_cache_free(kmem_cache, s); - } - } else - err = -ENOMEM; + err = memcg_alloc_cache_params(memcg, s, parent_cache); + if (err) + goto out_free_cache; + + err = __kmem_cache_create(s, flags); + if (err) + goto out_free_cache; -out_locked: + s->refcount = 1; + list_add(&s->list, &slab_caches); + memcg_register_cache(s); + +out_unlock: mutex_unlock(&slab_mutex); put_online_cpus(); - if (err) { - + /* + * There is no point in flooding logs with warnings or especially + * crashing the system if we fail to create a cache for a memcg. In + * this case we will be accounting the memcg allocation to the root + * cgroup until we succeed to create its own cache, but it isn't that + * critical. + */ + if (err && !memcg) { if (flags & SLAB_PANIC) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", name, err); @@ -236,11 +249,15 @@ out_locked: name, err); dump_stack(); } - return NULL; } - return s; + +out_free_cache: + memcg_free_cache_params(s); + kfree(s->name); + kmem_cache_free(kmem_cache, s); + goto out_unlock; } struct kmem_cache * @@ -263,11 +280,12 @@ void kmem_cache_destroy(struct kmem_cache *s) list_del(&s->list); if (!__kmem_cache_shutdown(s)) { + memcg_unregister_cache(s); mutex_unlock(&slab_mutex); if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); - memcg_release_cache(s); + memcg_free_cache_params(s); kfree(s->name); kmem_cache_free(kmem_cache, s); } else { |