diff options
author | Gavin Shan | 2012-05-29 15:06:50 -0700 |
---|---|---|
committer | Linus Torvalds | 2012-05-29 16:22:24 -0700 |
commit | 181eb39425f2b9275afcb015eaa547d11f71a02f (patch) | |
tree | 0ce0f09b5a86931e580a0a6b88e546831c7318ed | |
parent | 4e2f07750d9a94e8f23e86408df5ab95be88bf11 (diff) |
mm/memblock: fix memory leak on extending regions
The overall memblock has been organized into the memory regions and
reserved regions. Initially, the memory regions and reserved regions are
stored in the predetermined arrays of "struct memblock _region". It's
possible for the arrays to be enlarged when we have newly added regions,
but no free space left there. The policy here is to create double-sized
array either by slab allocator or memblock allocator. Unfortunately, we
didn't free the old array, which might be allocated through slab allocator
before. That would cause memory leak.
The patch introduces 2 variables to trace where (slab or memblock) the
memory and reserved regions come from. The memory for the memory or
reserved regions will be deallocated by kfree() if that was allocated by
slab allocator. Thus to fix the memory leak issue.
Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memblock.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index eae06ea3aa50..952123eba433 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = { int memblock_debug __initdata_memblock; static int memblock_can_resize __initdata_memblock; +static int memblock_memory_in_slab __initdata_memblock = 0; +static int memblock_reserved_in_slab __initdata_memblock = 0; /* inline so we don't get a warning when pr_debug is compiled out */ static inline const char *memblock_type_name(struct memblock_type *type) @@ -187,6 +189,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) struct memblock_region *new_array, *old_array; phys_addr_t old_size, new_size, addr; int use_slab = slab_is_available(); + int *in_slab; /* We don't allow resizing until we know about the reserved regions * of memory that aren't suitable for allocation @@ -198,6 +201,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) old_size = type->max * sizeof(struct memblock_region); new_size = old_size << 1; + /* Retrieve the slab flag */ + if (type == &memblock.memory) + in_slab = &memblock_memory_in_slab; + else + in_slab = &memblock_reserved_in_slab; + /* Try to find some space for it. * * WARNING: We assume that either slab_is_available() and we use it or @@ -235,22 +244,24 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) type->regions = new_array; type->max <<= 1; - /* If we use SLAB that's it, we are done */ - if (use_slab) - return 0; - - /* Add the new reserved region now. Should not fail ! */ - BUG_ON(memblock_reserve(addr, new_size)); - - /* If the array wasn't our static init one, then free it. We only do - * that before SLAB is available as later on, we don't know whether - * to use kfree or free_bootmem_pages(). Shouldn't be a big deal - * anyways + /* Free old array. We needn't free it if the array is the + * static one */ - if (old_array != memblock_memory_init_regions && - old_array != memblock_reserved_init_regions) + if (*in_slab) + kfree(old_array); + else if (old_array != memblock_memory_init_regions && + old_array != memblock_reserved_init_regions) memblock_free(__pa(old_array), old_size); + /* Reserve the new array if that comes from the memblock. + * Otherwise, we needn't do it + */ + if (!use_slab) + BUG_ON(memblock_reserve(addr, new_size)); + + /* Update slab flag */ + *in_slab = use_slab; + return 0; } |