aboutsummaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorDaniel Rosenberg2018-07-09 20:32:42 -0700
committerJaegeuk Kim2018-08-01 11:52:36 -0700
commit36b877af7992893b6d1ddbe96971cab5ab9e50eb (patch)
treeb513221beae092b1c0a90a713312bcc52fcf1b93 /fs/f2fs
parent20ee4382322cd9cf6ecfcf4f429ed108c617fb4a (diff)
f2fs: Keep alloc_valid_block_count in sync
If we attempt to request more blocks than we have room for, we try to instead request as much as we can, however, alloc_valid_block_count is not decremented to match the new value, allowing it to drift higher until the next checkpoint. This always decrements it when the requested amount cannot be fulfilled. Signed-off-by: Daniel Rosenberg <drosen@google.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/f2fs.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ed9a1135d56c..9e6b27596b26 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1686,18 +1686,20 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
goto enospc;
}
}
spin_unlock(&sbi->stat_lock);
- if (unlikely(release))
+ if (unlikely(release)) {
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
+ }
f2fs_i_blocks_write(inode, *count, true, true);
return 0;
enospc:
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
return -ENOSPC;
}