diff options
author | Dave Chinner | 2011-07-08 14:14:37 +1000 |
---|---|---|
committer | Al Viro | 2011-07-20 01:44:32 -0400 |
commit | e9299f5058595a655c3b207cda9635e28b9197e6 (patch) | |
tree | b31a4dc5cab98ee1701313f45e92e583c2d76f63 /mm/vmscan.c | |
parent | 3567b59aa80ac4417002bf58e35dce5c777d4164 (diff) |
vmscan: add customisable shrinker batch size
For shrinkers that have their own cond_resched* calls, having
shrink_slab break the work down into small batches is not
paticularly efficient. Add a custom batchsize field to the struct
shrinker so that shrinkers can use a larger batch size if they
desire.
A value of zero (uninitialised) means "use the default", so
behaviour is unchanged by this patch.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 387422470c95..febbc044e792 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -253,6 +253,8 @@ unsigned long shrink_slab(struct shrink_control *shrink, int shrink_ret = 0; long nr; long new_nr; + long batch_size = shrinker->batch ? shrinker->batch + : SHRINK_BATCH; /* * copy the current shrinker scan count into a local variable @@ -303,19 +305,18 @@ unsigned long shrink_slab(struct shrink_control *shrink, nr_pages_scanned, lru_pages, max_pass, delta, total_scan); - while (total_scan >= SHRINK_BATCH) { - long this_scan = SHRINK_BATCH; + while (total_scan >= batch_size) { int nr_before; nr_before = do_shrinker_shrink(shrinker, shrink, 0); shrink_ret = do_shrinker_shrink(shrinker, shrink, - this_scan); + batch_size); if (shrink_ret == -1) break; if (shrink_ret < nr_before) ret += nr_before - shrink_ret; - count_vm_events(SLABS_SCANNED, this_scan); - total_scan -= this_scan; + count_vm_events(SLABS_SCANNED, batch_size); + total_scan -= batch_size; cond_resched(); } |