aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorTrond Myklebust2012-05-21 10:12:39 -0400
committerTrond Myklebust2012-05-21 10:12:39 -0400
commitb3f87b98aa3dc22cc58f970140113b270015cddb (patch)
treec4aec5996567ad96310f61e9244e799f41bf2625 /mm
parent041245c88a29273788e8eff1353bc6e1f56c61df (diff)
parent1afeaf5c29aa07db25760d2fbed5c08a3aec3498 (diff)
Merge branch 'bugfixes' into nfs-for-next
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/mempolicy.c11
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/nobootmem.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/percpu.c22
-rw-r--r--mm/vmscan.c11
-rw-r--r--mm/vmstat.c4
9 files changed, 51 insertions, 27 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cd65cb19c941..ae8f708e3d75 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -532,7 +532,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
{
- struct page *page;
+ struct page *page = NULL;
struct mempolicy *mpol;
nodemask_t *nodemask;
struct zonelist *zonelist;
@@ -2498,7 +2498,6 @@ retry_avoidcopy:
if (outside_reserve) {
BUG_ON(huge_pte_none(pte));
if (unmap_ref_private(mm, vma, old_page, address)) {
- BUG_ON(page_count(old_page) != 1);
BUG_ON(huge_pte_none(pte));
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 31ab9c3f0178..b659260c56ad 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4507,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
swap_buffers:
/* Swap primary and spare array */
thresholds->spare = thresholds->primary;
+ /* If all events are unregistered, free the spare array */
+ if (!new) {
+ kfree(thresholds->spare);
+ thresholds->spare = NULL;
+ }
+
rcu_assign_pointer(thresholds->primary, new);
/* To be sure that nobody uses thresholds */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cfb6c8678754..b19569137529 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1361,11 +1361,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
mm = get_task_mm(task);
put_task_struct(task);
- if (mm)
- err = do_migrate_pages(mm, old, new,
- capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
- else
+
+ if (!mm) {
err = -EINVAL;
+ goto out;
+ }
+
+ err = do_migrate_pages(mm, old, new,
+ capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
mmput(mm);
out:
diff --git a/mm/migrate.c b/mm/migrate.c
index 51c08a0c6f68..11072383ae12 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1388,14 +1388,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
mm = get_task_mm(task);
put_task_struct(task);
- if (mm) {
- if (nodes)
- err = do_pages_move(mm, task_nodes, nr_pages, pages,
- nodes, status, flags);
- else
- err = do_pages_stat(mm, nr_pages, pages, status);
- } else
- err = -EINVAL;
+ if (!mm)
+ return -EINVAL;
+
+ if (nodes)
+ err = do_pages_move(mm, task_nodes, nr_pages, pages,
+ nodes, status, flags);
+ else
+ err = do_pages_stat(mm, nr_pages, pages, status);
mmput(mm);
return err;
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index e53bb8a256b1..1983fb1c7026 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
- int i;
- unsigned long start_aligned, end_aligned;
+ unsigned long i, start_aligned, end_aligned;
int order = ilog2(BITS_PER_LONG);
start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a712fb9e04ce..918330f71dba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
int ret;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
- if (!write || (ret == -EINVAL))
+ if (!write || (ret < 0))
return ret;
for_each_populated_zone(zone) {
for_each_possible_cpu(cpu) {
diff --git a/mm/percpu.c b/mm/percpu.c
index f47af9123af7..bb4be7435ce3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1132,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl,
for (alloc_end += gi->nr_units / upa;
alloc < alloc_end; alloc++) {
if (!(alloc % apl)) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk("%spcpu-alloc: ", lvl);
}
- printk("[%0*d] ", group_width, group);
+ printk(KERN_CONT "[%0*d] ", group_width, group);
for (unit_end += upa; unit < unit_end; unit++)
if (gi->cpu_map[unit] != NR_CPUS)
- printk("%0*d ", cpu_width,
+ printk(KERN_CONT "%0*d ", cpu_width,
gi->cpu_map[unit]);
else
- printk("%s ", empty_str);
+ printk(KERN_CONT "%s ", empty_str);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
/**
@@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
areas[group] = ptr;
base = min(ptr, base);
+ }
+
+ /*
+ * Copy data and free unused parts. This should happen after all
+ * allocations are complete; otherwise, we may end up with
+ * overlapping groups.
+ */
+ for (group = 0; group < ai->nr_groups; group++) {
+ struct pcpu_group_info *gi = &ai->groups[group];
+ void *ptr = areas[group];
for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
if (gi->cpu_map[i] == NR_CPUS) {
@@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void)
fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!ai || !fc)
panic("Failed to allocate memory for percpu areas.");
+ /* kmemleak tracks the percpu allocations separately */
+ kmemleak_free(fc);
ai->dyn_size = unit_size;
ai->unit_size = unit_size;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1a518684a32f..33dc256033b5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1568,9 +1568,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
reclaim_stat->recent_scanned[0] += nr_anon;
reclaim_stat->recent_scanned[1] += nr_file;
- if (current_is_kswapd())
- __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
- __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+ if (global_reclaim(sc)) {
+ if (current_is_kswapd())
+ __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
+ nr_reclaimed);
+ else
+ __count_zone_vm_events(PGSTEAL_DIRECT, zone,
+ nr_reclaimed);
+ }
putback_inactive_pages(mz, &page_list);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f600557a7659..7db1b9bab492 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -738,7 +738,8 @@ const char * const vmstat_text[] = {
"pgmajfault",
TEXTS_FOR_ZONES("pgrefill")
- TEXTS_FOR_ZONES("pgsteal")
+ TEXTS_FOR_ZONES("pgsteal_kswapd")
+ TEXTS_FOR_ZONES("pgsteal_direct")
TEXTS_FOR_ZONES("pgscan_kswapd")
TEXTS_FOR_ZONES("pgscan_direct")
@@ -747,7 +748,6 @@ const char * const vmstat_text[] = {
#endif
"pginodesteal",
"slabs_scanned",
- "kswapd_steal",
"kswapd_inodesteal",
"kswapd_low_wmark_hit_quickly",
"kswapd_high_wmark_hit_quickly",