aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorVitaly Wool2017-10-03 16:14:47 -0700
committerLinus Torvalds2017-10-03 17:54:24 -0700
commitd5567c9df1ef001b2a7e6684b3b3498371ee4cae (patch)
treeb3ce7486314ee88b61cb34f010947c6b40f8548f /mm
parentd9d73e81fe82fdf4ee65a48c26531edc04108349 (diff)
z3fold: fix potential race in z3fold_reclaim_page
It is possible that on a (partially) unsuccessful page reclaim, kref_put() called in z3fold_reclaim_page() does not yield page release, but the page is released shortly afterwards by another thread. Then z3fold_reclaim_page() would try to list_add() that (released) page again which is obviously a bug. To avoid that, spin_lock() has to be taken earlier, before the kref_put() call mentioned earlier. Link: http://lkml.kernel.org/r/20170913162937.bfff21c7d12b12a5f47639fd@gmail.com Signed-off-by: Vitaly Wool <vitalywool@gmail.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: <Oleksiy.Avramchenko@sony.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/z3fold.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 486550df32be..b04fa3ba1bf2 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -875,16 +875,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
goto next;
}
next:
+ spin_lock(&pool->lock);
if (test_bit(PAGE_HEADLESS, &page->private)) {
if (ret == 0) {
+ spin_unlock(&pool->lock);
free_z3fold_page(page);
return 0;
}
} else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
atomic64_dec(&pool->pages_nr);
+ spin_unlock(&pool->lock);
return 0;
}
- spin_lock(&pool->lock);
/*
* Add to the beginning of LRU.