aboutsummaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorDavid S. Miller2012-11-17 22:00:43 -0500
committerDavid S. Miller2012-11-17 22:00:43 -0500
commit67f4efdce7d85282fbd5832cddc80a07eb89b6d6 (patch)
tree9a1771ef13b27abdf8cf172e5b7556ab93e5c48c /mm/vmscan.c
parentc53aa5058ad5ca8876a47d6639ad4d4f2c5ed584 (diff)
parentf4a75d2eb7b1e2206094b901be09adb31ba63681 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor line offset auto-merges. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c27
1 files changed, 2 insertions, 25 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2624edcfb420..48550c66f1f2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc)
return false;
}
-#ifdef CONFIG_COMPACTION
-/*
- * If compaction is deferred for sc->order then scale the number of pages
- * reclaimed based on the number of consecutive allocation failures
- */
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- struct zone *zone = lruvec_zone(lruvec);
-
- if (zone->compact_order_failed <= sc->order)
- pages_for_compaction <<= zone->compact_defer_shift;
- return pages_for_compaction;
-}
-#else
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- return pages_for_compaction;
-}
-#endif
-
/*
* Reclaim/compaction is used for high-order allocation requests. It reclaims
* order-0 pages before compacting the zone. should_continue_reclaim() returns
@@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
-
- pages_for_compaction = scale_for_compaction(pages_for_compaction,
- lruvec, sc);
inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (nr_swap_pages > 0)
inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
@@ -3017,6 +2992,8 @@ static int kswapd(void *p)
&balanced_classzone_idx);
}
}
+
+ current->reclaim_state = NULL;
return 0;
}