aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner2009-03-31 15:23:12 -0700
committerLinus Torvalds2009-04-01 08:59:13 -0700
commit2443462b0a04ef0f82ad48f4fd0ef4ac5b24c4b7 (patch)
tree3be4cdc338bf004f7b8c6c70baf7fb78ad252844 /mm
parent2584e517320bd48dc8d20e38a2621a2dbe58fade (diff)
mm: move pagevec stripping to save unlock-relock
In shrink_active_list() after the deactivation loop, we strip buffer heads from the potentially remaining pages in the pagevec. Currently, this drops the zone's lru lock for stripping, only to reacquire it again afterwards to update statistics. It is not necessary to strip the pages before updating the stats, so move the whole thing out of the protected region and save the extra locking. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: MinChan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 51f2df04d7cf..988aef933016 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1298,14 +1298,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
pgdeactivate += pgmoved;
- if (buffer_heads_over_limit) {
- spin_unlock_irq(&zone->lru_lock);
- pagevec_strip(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
+ if (buffer_heads_over_limit)
+ pagevec_strip(&pvec);
if (vm_swap_full())
pagevec_swap_free(&pvec);