From 6cbf16b3b66a61b9c6df8f2ed4ac346cb427f28a Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 8 Sep 2015 15:04:49 -0700 Subject: zsmalloc: use class->pages_per_zspage There is no need to recalcurate pages_per_zspage in runtime. Just use class->pages_per_zspage to avoid unnecessary runtime overhead. Signed-off-by: Minchan Kim Acked-by: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c10885ca87a4..ce08d043becd 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class) obj_wasted /= get_maxobj_per_zspage(class->size, class->pages_per_zspage); - return obj_wasted * get_pages_per_zspage(class->size); + return obj_wasted * class->pages_per_zspage; } static void __zs_compact(struct zs_pool *pool, struct size_class *class) @@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(pool, class, dst_page); if (putback_zspage(pool, class, src_page) == ZS_EMPTY) - pool->stats.pages_compacted += - get_pages_per_zspage(class->size); + pool->stats.pages_compacted += class->pages_per_zspage; spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); -- cgit v1.2.3