diff options
author | Minchan Kim | 2015-09-08 15:04:49 -0700 |
---|---|---|
committer | Linus Torvalds | 2015-09-08 15:35:28 -0700 |
commit | 6cbf16b3b66a61b9c6df8f2ed4ac346cb427f28a (patch) | |
tree | 51eb943fa8a3dbaa304226ae57c3275514d0ec06 /mm | |
parent | ad9d5e175a77a253f52a7259a7c918b8351d99f1 (diff) |
zsmalloc: use class->pages_per_zspage
There is no need to recalcurate pages_per_zspage in runtime. Just use
class->pages_per_zspage to avoid unnecessary runtime overhead.
Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zsmalloc.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c10885ca87a4..ce08d043becd 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class) obj_wasted /= get_maxobj_per_zspage(class->size, class->pages_per_zspage); - return obj_wasted * get_pages_per_zspage(class->size); + return obj_wasted * class->pages_per_zspage; } static void __zs_compact(struct zs_pool *pool, struct size_class *class) @@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(pool, class, dst_page); if (putback_zspage(pool, class, src_page) == ZS_EMPTY) - pool->stats.pages_compacted += - get_pages_per_zspage(class->size); + pool->stats.pages_compacted += class->pages_per_zspage; spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); |