aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki2008-04-29 01:00:24 -0700
committerLinus Torvalds2008-04-29 08:06:11 -0700
commit33327948782bcef89c78eb47af86b6a2df9fd4a5 (patch)
treec1045eaee1b63a6be37a09c969e2433a5c595c33 /mm
parent4a56d02e34baedbea5eb1fd558f2b856b8c7db1e (diff)
memcgroup: use vmalloc for mem_cgroup allocation
On ia64, this kmalloc() requires order-4 pages. But this is not necessary to be physically contiguous. For big mem_cgroup, vmalloc is better. For small ones, kmalloc is used. [akpm@linux-foundation.org: simplification] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c5285afe2048..15aa34b11e88 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
#include <asm/uaccess.h>
@@ -983,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]);
}
+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+ struct mem_cgroup *mem;
+
+ if (sizeof(*mem) < PAGE_SIZE)
+ mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+ else
+ mem = vmalloc(sizeof(*mem));
+
+ if (mem)
+ memset(mem, 0, sizeof(*mem));
+ return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+ if (sizeof(*mem) < PAGE_SIZE)
+ kfree(mem);
+ else
+ vfree(mem);
+}
+
+
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
@@ -993,12 +1017,11 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
mem = &init_mem_cgroup;
page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
} else {
- mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
+ mem = mem_cgroup_alloc();
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
}
- if (mem == NULL)
- return ERR_PTR(-ENOMEM);
-
res_counter_init(&mem->res);
memset(&mem->info, 0, sizeof(mem->info));
@@ -1012,7 +1035,7 @@ free_out:
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
if (cont->parent != NULL)
- kfree(mem);
+ mem_cgroup_free(mem);
return ERR_PTR(-ENOMEM);
}
@@ -1032,7 +1055,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
- kfree(mem_cgroup_from_cont(cont));
+ mem_cgroup_free(mem_cgroup_from_cont(cont));
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,