diff options
author | Sergey Senozhatsky | 2023-06-24 14:12:15 +0900 |
---|---|---|
committer | Andrew Morton | 2023-08-18 10:12:08 -0700 |
commit | 4ce36584da19ff6a5d171a317c6c34c567d4628e (patch) | |
tree | 8e75322ec1721303411241d25dc3d8ef35643c45 /mm/zsmalloc.c | |
parent | df9cd3cbf209b9357f2939ba63afa427026e954b (diff) |
zsmalloc: move migration destination zspage inuse check
Destination zspage fullness check need to be done after zs_object_copy()
because that's where source and destination zspages fullness change.
Checking destination zspage fullness before zs_object_copy() may cause
migration to loop through source zspage sub-pages scanning for allocate
objects just to find out at the end that the destination zspage is full.
Link: https://lkml.kernel.org/r/20230624053120.643409-3-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Alexey Romanov <avromanov@sberdevices.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r-- | mm/zsmalloc.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5d60eaedc3b7..4a84f7877669 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1620,10 +1620,6 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class, continue; } - /* Stop if there is no more space */ - if (zspage_full(class, get_zspage(d_page))) - break; - used_obj = handle_to_obj(handle); free_obj = obj_malloc(pool, get_zspage(d_page), handle); zs_object_copy(class, free_obj, used_obj); @@ -1631,6 +1627,10 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class, record_obj(handle, free_obj); obj_free(class->size, used_obj); + /* Stop if there is no more space */ + if (zspage_full(class, get_zspage(d_page))) + break; + /* Stop if there are no more objects to migrate */ if (zspage_empty(get_zspage(s_page))) break; |