aboutsummaryrefslogtreecommitdiff
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
authorLinus Torvalds2020-01-30 15:39:24 -0800
committerLinus Torvalds2020-01-30 15:39:24 -0800
commit6e135baed8e70b00b88f7608f6b041461a5270bc (patch)
tree5a57809af84b83db9427f502119efb567c48ea58 /fs/f2fs/data.c
parent0196be12aab2dc3a3e44824045229b0e539be8fd (diff)
parent80f2388afa6ef985f9c5c228e36705c4d4db4756 (diff)
Merge tag 'f2fs-for-5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim: "In this series, we've implemented transparent compression experimentally. It supports LZO and LZ4, but will add more later as we investigate in the field more. At this point, the feature doesn't expose compressed space to user directly in order to guarantee potential data updates later to the space. Instead, the main goal is to reduce data writes to flash disk as much as possible, resulting in extending disk life time as well as relaxing IO congestion. Alternatively, we're also considering to add ioctl() to reclaim compressed space and show it to user after putting the immutable bit. Enhancements: - add compression support - avoid unnecessary locks in quota ops - harden power-cut scenario for zoned block devices - use private bio_set to avoid IO congestion - replace GC mutex with rwsem to serialize callers Bug fixes: - fix dentry consistency and memory corruption in rename()'s error case - fix wrong swap extent reports - fix casefolding bugs - change lock coverage to avoid deadlock - avoid GFP_KERNEL under f2fs_lock_op And, we've cleaned up sysfs entries to prepare no debugfs" * tag 'f2fs-for-5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (31 commits) f2fs: fix race conditions in ->d_compare() and ->d_hash() f2fs: fix dcache lookup of !casefolded directories f2fs: Add f2fs stats to sysfs f2fs: delete duplicate information on sysfs nodes f2fs: change to use rwsem for gc_mutex f2fs: update f2fs document regarding to fsync_mode f2fs: add a way to turn off ipu bio cache f2fs: code cleanup for f2fs_statfs_project() f2fs: fix miscounted block limit in f2fs_statfs_project() f2fs: show the CP_PAUSE reason in checkpoint traces f2fs: fix deadlock allocating bio_post_read_ctx from mempool f2fs: remove unneeded check for error allocating bio_post_read_ctx f2fs: convert inline_dir early before starting rename f2fs: fix memleak of kobject f2fs: fix to add swap extent correctly f2fs: run fsck when getting bad inode during GC f2fs: support data compression f2fs: free sysfs kobject f2fs: declare nested quota_sem and remove unnecessary sems f2fs: don't put new_page twice in f2fs_rename ...
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c734
1 files changed, 634 insertions, 100 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0fa356e94ef5..8bd9afa81c54 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -31,6 +31,47 @@
static struct kmem_cache *bio_post_read_ctx_cache;
static struct kmem_cache *bio_entry_slab;
static mempool_t *bio_post_read_ctx_pool;
+static struct bio_set f2fs_bioset;
+
+#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
+
+int __init f2fs_init_bioset(void)
+{
+ if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
+ 0, BIOSET_NEED_BVECS))
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_bioset(void)
+{
+ bioset_exit(&f2fs_bioset);
+}
+
+static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
+ unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
+}
+
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail)
+{
+ struct bio *bio;
+
+ if (no_fail) {
+ /* No failure on bio allocation */
+ bio = __f2fs_bio_alloc(GFP_NOIO, npages);
+ if (!bio)
+ bio = __f2fs_bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+ return bio;
+ }
+ if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
+ f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
+ return NULL;
+ }
+
+ return __f2fs_bio_alloc(GFP_KERNEL, npages);
+}
static bool __is_cp_guaranteed(struct page *page)
{
@@ -41,6 +82,9 @@ static bool __is_cp_guaranteed(struct page *page)
if (!mapping)
return false;
+ if (f2fs_is_compressed_page(page))
+ return false;
+
inode = mapping->host;
sbi = F2FS_I_SB(inode);
@@ -73,19 +117,19 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
- STEP_INITIAL = 0,
STEP_DECRYPT,
+ STEP_DECOMPRESS,
STEP_VERITY,
};
struct bio_post_read_ctx {
struct bio *bio;
+ struct f2fs_sb_info *sbi;
struct work_struct work;
- unsigned int cur_step;
unsigned int enabled_steps;
};
-static void __read_end_io(struct bio *bio)
+static void __read_end_io(struct bio *bio, bool compr, bool verity)
{
struct page *page;
struct bio_vec *bv;
@@ -94,6 +138,13 @@ static void __read_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, iter_all) {
page = bv->bv_page;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (compr && f2fs_is_compressed_page(page)) {
+ f2fs_decompress_pages(bio, page, verity);
+ continue;
+ }
+#endif
+
/* PG_error was set if any post_read step failed */
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page);
@@ -105,31 +156,107 @@ static void __read_end_io(struct bio *bio)
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
}
- if (bio->bi_private)
- mempool_free(bio->bi_private, bio_post_read_ctx_pool);
- bio_put(bio);
+}
+
+static void f2fs_release_read_bio(struct bio *bio);
+static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
+{
+ if (!compr)
+ __read_end_io(bio, false, verity);
+ f2fs_release_read_bio(bio);
+}
+
+static void f2fs_decompress_bio(struct bio *bio, bool verity)
+{
+ __read_end_io(bio, true, verity);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
-static void decrypt_work(struct work_struct *work)
+static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
+{
+ fscrypt_decrypt_bio(ctx->bio);
+}
+
+static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
+{
+ f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
+}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
+{
+ f2fs_decompress_end_io(rpages, cluster_size, false, true);
+}
+
+static void f2fs_verify_bio(struct bio *bio)
+{
+ struct page *page = bio_first_page_all(bio);
+ struct decompress_io_ctx *dic =
+ (struct decompress_io_ctx *)page_private(page);
+
+ f2fs_verify_pages(dic->rpages, dic->cluster_size);
+ f2fs_free_dic(dic);
+}
+#endif
+
+static void f2fs_verity_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
+ struct bio *bio = ctx->bio;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ unsigned int enabled_steps = ctx->enabled_steps;
+#endif
- fscrypt_decrypt_bio(ctx->bio);
+ /*
+ * fsverity_verify_bio() may call readpages() again, and while verity
+ * will be disabled for this, decryption may still be needed, resulting
+ * in another bio_post_read_ctx being allocated. So to prevent
+ * deadlocks we need to release the current ctx to the mempool first.
+ * This assumes that verity is the last post-read step.
+ */
+ mempool_free(ctx, bio_post_read_ctx_pool);
+ bio->bi_private = NULL;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /* previous step is decompression */
+ if (enabled_steps & (1 << STEP_DECOMPRESS)) {
+ f2fs_verify_bio(bio);
+ f2fs_release_read_bio(bio);
+ return;
+ }
+#endif
- bio_post_read_processing(ctx);
+ fsverity_verify_bio(bio);
+ __f2fs_read_end_io(bio, false, false);
}
-static void verity_work(struct work_struct *work)
+static void f2fs_post_read_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
- fsverity_verify_bio(ctx->bio);
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT))
+ f2fs_decrypt_work(ctx);
- bio_post_read_processing(ctx);
+ if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
+ f2fs_decompress_work(ctx);
+
+ if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+ INIT_WORK(&ctx->work, f2fs_verity_work);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
+
+ __f2fs_read_end_io(ctx->bio,
+ ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
+}
+
+static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
+ struct work_struct *work)
+{
+ queue_work(sbi->post_read_wq, work);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
@@ -139,31 +266,26 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
* verity may require reading metadata pages that need decryption, and
* we shouldn't recurse to the same workqueue.
*/
- switch (++ctx->cur_step) {
- case STEP_DECRYPT:
- if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
- INIT_WORK(&ctx->work, decrypt_work);
- fscrypt_enqueue_decrypt_work(&ctx->work);
- return;
- }
- ctx->cur_step++;
- /* fall-through */
- case STEP_VERITY:
- if (ctx->enabled_steps & (1 << STEP_VERITY)) {
- INIT_WORK(&ctx->work, verity_work);
- fsverity_enqueue_verify_work(&ctx->work);
- return;
- }
- ctx->cur_step++;
- /* fall-through */
- default:
- __read_end_io(ctx->bio);
+
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
+ ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
+ INIT_WORK(&ctx->work, f2fs_post_read_work);
+ f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
+ return;
}
+
+ if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+ INIT_WORK(&ctx->work, f2fs_verity_work);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
+
+ __f2fs_read_end_io(ctx->bio, false, false);
}
static bool f2fs_bio_post_read_required(struct bio *bio)
{
- return bio->bi_private && !bio->bi_status;
+ return bio->bi_private;
}
static void f2fs_read_end_io(struct bio *bio)
@@ -178,12 +300,11 @@ static void f2fs_read_end_io(struct bio *bio)
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
- ctx->cur_step = STEP_INITIAL;
bio_post_read_processing(ctx);
return;
}
- __read_end_io(bio);
+ __f2fs_read_end_io(bio, false, false);
}
static void f2fs_write_end_io(struct bio *bio)
@@ -214,6 +335,13 @@ static void f2fs_write_end_io(struct bio *bio)
fscrypt_finalize_bounce_page(&page);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_is_compressed_page(page)) {
+ f2fs_compress_write_end_io(bio, page);
+ continue;
+ }
+#endif
+
if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO);
if (type == F2FS_WB_CP_DATA)
@@ -358,6 +486,12 @@ submit_io:
submit_bio(bio);
}
+void f2fs_submit_bio(struct f2fs_sb_info *sbi,
+ struct bio *bio, enum page_type type)
+{
+ __submit_bio(sbi, bio, type);
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -380,7 +514,6 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
struct page *page, nid_t ino)
{
struct bio_vec *bvec;
- struct page *target;
struct bvec_iter_all iter_all;
if (!bio)
@@ -390,10 +523,18 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
return true;
bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *target = bvec->bv_page;
- target = bvec->bv_page;
- if (fscrypt_is_bounce_page(target))
+ if (fscrypt_is_bounce_page(target)) {
target = fscrypt_pagecache_page(target);
+ if (IS_ERR(target))
+ continue;
+ }
+ if (f2fs_is_compressed_page(target)) {
+ target = f2fs_compress_control_page(target);
+ if (IS_ERR(target))
+ continue;
+ }
if (inode && inode == target->mapping->host)
return true;
@@ -588,7 +729,8 @@ static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
found = true;
- if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
+ if (bio_add_page(*bio, page, PAGE_SIZE, 0) ==
+ PAGE_SIZE) {
ret = 0;
break;
}
@@ -728,7 +870,12 @@ next:
verify_fio_blkaddr(fio);
- bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ if (fio->encrypted_page)
+ bio_page = fio->encrypted_page;
+ else if (fio->compressed_page)
+ bio_page = fio->compressed_page;
+ else
+ bio_page = fio->page;
/* set submitted = true as a return value */
fio->submitted = true;
@@ -797,17 +944,16 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
-
+ if (f2fs_compressed_file(inode))
+ post_read_steps |= 1 << STEP_DECOMPRESS;
if (f2fs_need_verity(inode, first_idx))
post_read_steps |= 1 << STEP_VERITY;
if (post_read_steps) {
+ /* Due to the mempool, this never fails. */
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
- if (!ctx) {
- bio_put(bio);
- return ERR_PTR(-ENOMEM);
- }
ctx->bio = bio;
+ ctx->sbi = sbi;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
}
@@ -815,6 +961,13 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
return bio;
}
+static void f2fs_release_read_bio(struct bio *bio)
+{
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+ bio_put(bio);
+}
+
/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
block_t blkaddr)
@@ -1180,19 +1333,6 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
int err = 0;
bool direct_io = iocb->ki_flags & IOCB_DIRECT;
- /* convert inline data for Direct I/O*/
- if (direct_io) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
-
- if (direct_io && allow_outplace_dio(inode, iocb, from))
- return 0;
-
- if (is_inode_flag_set(inode, FI_NO_PREALLOC))
- return 0;
-
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
if (map.m_len > map.m_lblk)
@@ -1872,6 +2012,144 @@ out:
return ret;
}
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ unsigned nr_pages, sector_t *last_block_in_bio,
+ bool is_readahead)
+{
+ struct dnode_of_data dn;
+ struct inode *inode = cc->inode;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct bio *bio = *bio_ret;
+ unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
+ sector_t last_block_in_file;
+ const unsigned blkbits = inode->i_blkbits;
+ const unsigned blocksize = 1 << blkbits;
+ struct decompress_io_ctx *dic = NULL;
+ int i;
+ int ret = 0;
+
+ f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
+
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+
+ /* get rid of pages beyond EOF */
+ for (i = 0; i < cc->cluster_size; i++) {
+ struct page *page = cc->rpages[i];
+
+ if (!page)
+ continue;
+ if ((sector_t)page->index >= last_block_in_file) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ } else if (!PageUptodate(page)) {
+ continue;
+ }
+ unlock_page(page);
+ cc->rpages[i] = NULL;
+ cc->nr_rpages--;
+ }
+
+ /* we are done since all pages are beyond EOF */
+ if (f2fs_cluster_is_empty(cc))
+ goto out;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+ if (ret)
+ goto out;
+
+ /* cluster was overwritten as normal cluster */
+ if (dn.data_blkaddr != COMPRESS_ADDR)
+ goto out;
+
+ for (i = 1; i < cc->cluster_size; i++) {
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ break;
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+ ret = -EFAULT;
+ goto out_put_dnode;
+ }
+ cc->nr_cpages++;
+ }
+
+ /* nothing to decompress */
+ if (cc->nr_cpages == 0) {
+ ret = 0;
+ goto out_put_dnode;
+ }
+
+ dic = f2fs_alloc_dic(cc);
+ if (IS_ERR(dic)) {
+ ret = PTR_ERR(dic);
+ goto out_put_dnode;
+ }
+
+ for (i = 0; i < dic->nr_cpages; i++) {
+ struct page *page = dic->cpages[i];
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i + 1);
+
+ if (bio && !page_is_mergeable(sbi, bio,
+ *last_block_in_bio, blkaddr)) {
+submit_and_realloc:
+ __submit_bio(sbi, bio, DATA);
+ bio = NULL;
+ }
+
+ if (!bio) {
+ bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+ is_readahead ? REQ_RAHEAD : 0,
+ page->index);
+ if (IS_ERR(bio)) {
+ ret = PTR_ERR(bio);
+ bio = NULL;
+ dic->failed = true;
+ if (refcount_sub_and_test(dic->nr_cpages - i,
+ &dic->ref))
+ f2fs_decompress_end_io(dic->rpages,
+ cc->cluster_size, true,
+ false);
+ f2fs_free_dic(dic);
+ f2fs_put_dnode(&dn);
+ *bio_ret = bio;
+ return ret;
+ }
+ }
+
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
+ if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ goto submit_and_realloc;
+
+ inc_page_count(sbi, F2FS_RD_DATA);
+ ClearPageError(page);
+ *last_block_in_bio = blkaddr;
+ }
+
+ f2fs_put_dnode(&dn);
+
+ *bio_ret = bio;
+ return 0;
+
+out_put_dnode:
+ f2fs_put_dnode(&dn);
+out:
+ f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
+ *bio_ret = bio;
+ return ret;
+}
+#endif
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -1889,6 +2167,19 @@ int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
struct f2fs_map_blocks map;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct compress_ctx cc = {
+ .inode = inode,
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .cluster_idx = NULL_CLUSTER,
+ .rpages = NULL,
+ .cpages = NULL,
+ .nr_rpages = 0,
+ .nr_cpages = 0,
+ };
+#endif
+ unsigned max_nr_pages = nr_pages;
int ret = 0;
map.m_pblk = 0;
@@ -1912,9 +2203,41 @@ int f2fs_mpage_readpages(struct address_space *mapping,
goto next_page;
}
- ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
- &last_block_in_bio, is_readahead);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ /* there are remained comressed pages, submit them */
+ if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
+ ret = f2fs_read_multi_pages(&cc, &bio,
+ max_nr_pages,
+ &last_block_in_bio,
+ is_readahead);
+ f2fs_destroy_compress_ctx(&cc);
+ if (ret)
+ goto set_error_page;
+ }
+ ret = f2fs_is_compressed_cluster(inode, page->index);
+ if (ret < 0)
+ goto set_error_page;
+ else if (!ret)
+ goto read_single_page;
+
+ ret = f2fs_init_compress_ctx(&cc);
+ if (ret)
+ goto set_error_page;
+
+ f2fs_compress_ctx_add_page(&cc, page);
+
+ goto next_page;
+ }
+read_single_page:
+#endif
+
+ ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
+ &bio, &last_block_in_bio, is_readahead);
if (ret) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+set_error_page:
+#endif
SetPageError(page);
zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
@@ -1922,6 +2245,19 @@ int f2fs_mpage_readpages(struct address_space *mapping,
next_page:
if (pages)
put_page(page);
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ /* last page */
+ if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
+ ret = f2fs_read_multi_pages(&cc, &bio,
+ max_nr_pages,
+ &last_block_in_bio,
+ is_readahead);
+ f2fs_destroy_compress_ctx(&cc);
+ }
+ }
+#endif
}
BUG_ON(pages && !list_empty(pages));
if (bio)
@@ -1936,6 +2272,11 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
trace_f2fs_readpage(page, DATA);
+ if (!f2fs_is_compress_backend_ready(inode)) {
+ unlock_page(page);
+ return -EOPNOTSUPP;
+ }
+
/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
@@ -1954,6 +2295,9 @@ static int f2fs_read_data_pages(struct file *file,
trace_f2fs_readpages(inode, page, nr_pages);
+ if (!f2fs_is_compress_backend_ready(inode))
+ return 0;
+
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
return 0;
@@ -1961,22 +2305,23 @@ static int f2fs_read_data_pages(struct file *file,
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
}
-static int encrypt_one_page(struct f2fs_io_info *fio)
+int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
- struct page *mpage;
+ struct page *mpage, *page;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
return 0;
+ page = fio->compressed_page ? fio->compressed_page : fio->page;
+
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
- PAGE_SIZE, 0,
- gfp_flags);
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
@@ -2136,7 +2481,7 @@ got_it:
if (ipu_force ||
(__is_valid_data_blkaddr(fio->old_blkaddr) &&
need_inplace_update(fio))) {
- err = encrypt_one_page(fio);
+ err = f2fs_encrypt_one_page(fio);
if (err)
goto out_writepage;
@@ -2172,13 +2517,16 @@ got_it:
fio->version = ni.version;
- err = encrypt_one_page(fio);
+ err = f2fs_encrypt_one_page(fio);
if (err)
goto out_writepage;
set_page_writeback(page);
ClearPageError(page);
+ if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
+ f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
+
/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
@@ -2193,16 +2541,17 @@ out:
return err;
}
-static int __write_data_page(struct page *page, bool *submitted,
+int f2fs_write_single_data_page(struct page *page, int *submitted,
struct bio **bio,
sector_t *last_block,
struct writeback_control *wbc,
- enum iostat_type io_type)
+ enum iostat_type io_type,
+ int compr_blocks)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = ((unsigned long long) i_size)
+ const pgoff_t end_index = ((unsigned long long)i_size)
>> PAGE_SHIFT;
loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
@@ -2218,6 +2567,7 @@ static int __write_data_page(struct page *page, bool *submitted,
.page = page,
.encrypted_page = NULL,
.submitted = false,
+ .compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
.io_type = io_type,
.io_wbc = wbc,
@@ -2242,7 +2592,9 @@ static int __write_data_page(struct page *page, bool *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index || f2fs_verity_in_progress(inode))
+ if (page->index < end_index ||
+ f2fs_verity_in_progress(inode) ||
+ compr_blocks)
goto write;
/*
@@ -2318,7 +2670,6 @@ out:
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
-
unlock_page(page);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
!F2FS_I(inode)->cp_task)
@@ -2331,7 +2682,7 @@ out:
}
if (submitted)
- *submitted = fio.submitted;
+ *submitted = fio.submitted ? 1 : 0;
return 0;
@@ -2352,7 +2703,23 @@ redirty_out:
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct inode *inode = page->mapping->host;
+
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ goto out;
+
+ if (f2fs_compressed_file(inode)) {
+ if (f2fs_is_compressed_cluster(inode, page->index)) {
+ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+ }
+out:
+#endif
+
+ return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+ wbc, FS_DATA_IO, 0);
}
/*
@@ -2365,11 +2732,27 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
enum iostat_type io_type)
{
int ret = 0;
- int done = 0;
+ int done = 0, retry = 0;
struct pagevec pvec;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct inode *inode = mapping->host;
+ struct compress_ctx cc = {
+ .inode = inode,
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .cluster_idx = NULL_CLUSTER,
+ .rpages = NULL,
+ .nr_rpages = 0,
+ .cpages = NULL,
+ .rbuf = NULL,
+ .cbuf = NULL,
+ .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
+ .private = NULL,
+ };
+#endif
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
@@ -2379,6 +2762,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int range_whole = 0;
xa_mark_t tag;
int nwritten = 0;
+ int submitted = 0;
+ int i;
pagevec_init(&pvec);
@@ -2408,12 +2793,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
else
tag = PAGECACHE_TAG_DIRTY;
retry:
+ retry = 0;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
- while (!done && (index <= end)) {
- int i;
-
+ while (!done && !retry && (index <= end)) {
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
@@ -2421,15 +2805,62 @@ retry:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- bool submitted = false;
+ bool need_readd;
+readd:
+ need_readd = false;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ ret = f2fs_init_compress_ctx(&cc);
+ if (ret) {
+ done = 1;
+ break;
+ }
+
+ if (!f2fs_cluster_can_merge_page(&cc,
+ page->index)) {
+ ret = f2fs_write_multi_pages(&cc,
+ &submitted, wbc, io_type);
+ if (!ret)
+ need_readd = true;
+ goto result;
+ }
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto lock_page;
+
+ if (f2fs_cluster_is_empty(&cc)) {
+ void *fsdata = NULL;
+ struct page *pagep;
+ int ret2;
+
+ ret2 = f2fs_prepare_compress_overwrite(
+ inode, &pagep,
+ page->index, &fsdata);
+ if (ret2 < 0) {
+ ret = ret2;
+ done = 1;
+ break;
+ } else if (ret2 &&
+ !f2fs_compress_write_end(inode,
+ fsdata, page->index,
+ 1)) {
+ retry = 1;
+ break;
+ }
+ } else {
+ goto lock_page;
+ }
+ }
+#endif
/* give a priority to WB_SYNC threads */
if (atomic_read(&sbi->wb_sync_req[DATA]) &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
-
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+lock_page:
+#endif
done_index = page->index;
retry_write:
lock_page(page);
@@ -2456,45 +2887,71 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = __write_data_page(page, &submitted, &bio,
- &last_block, wbc, io_type);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ get_page(page);
+ f2fs_compress_ctx_add_page(&cc, page);
+ continue;
+ }
+#endif
+ ret = f2fs_write_single_data_page(page, &submitted,
+ &bio, &last_block, wbc, io_type, 0);
+ if (ret == AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+result:
+#endif
+ nwritten += submitted;
+ wbc->nr_to_write -= submitted;
+
if (unlikely(ret)) {
/*
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
ret = 0;
- continue;
+ goto next;
} else if (ret == -EAGAIN) {
ret = 0;
if (wbc->sync_mode == WB_SYNC_ALL) {
cond_resched();
congestion_wait(BLK_RW_ASYNC,
- HZ/50);
+ HZ/50);
goto retry_write;
}
- continue;
+ goto next;
}
done_index = page->index + 1;
done = 1;
break;
- } else if (submitted) {
- nwritten++;
}
- if (--wbc->nr_to_write <= 0 &&
+ if (wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
+next:
+ if (need_readd)
+ goto readd;
}
pagevec_release(&pvec);
cond_resched();
}
-
- if (!cycled && !done) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /* flush remained pages in compress cluster */
+ if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
+ ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
+ nwritten += submitted;
+ wbc->nr_to_write -= submitted;
+ if (ret) {
+ done = 1;
+ retry = 0;
+ }
+ }
+#endif
+ if ((!cycled && !done) || retry) {
cycled = 1;
index = 0;
end = writeback_index - 1;
@@ -2518,6 +2975,8 @@ static inline bool __should_serialize_io(struct inode *inode,
{
if (!S_ISREG(inode->i_mode))
return false;
+ if (f2fs_compressed_file(inode))
+ return true;
if (IS_NOQUOTA(inode))
return false;
/* to avoid deadlock in path of data flush */
@@ -2613,14 +3072,16 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
struct inode *inode = mapping->host;
loff_t i_size = i_size_read(inode);
+ if (IS_NOQUOTA(inode))
+ return;
+
/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
if (to > i_size && !f2fs_verity_in_progress(inode)) {
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, i_size);
- if (!IS_NOQUOTA(inode))
- f2fs_truncate_blocks(inode, i_size, true);
+ f2fs_truncate_blocks(inode, i_size, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2660,6 +3121,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
__do_map_lock(sbi, flag, true);
locked = true;
}
+
restart:
/* check inline_data */
ipage = f2fs_get_node_page(sbi, inode->i_ino);
@@ -2750,6 +3212,24 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (err)
goto fail;
}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ int ret;
+
+ *fsdata = NULL;
+
+ ret = f2fs_prepare_compress_overwrite(inode, pagep,
+ index, fsdata);
+ if (ret < 0) {
+ err = ret;
+ goto fail;
+ } else if (ret) {
+ return 0;
+ }
+ }
+#endif
+
repeat:
/*
* Do not use grab_cache_page_write_begin() to avoid deadlock due to
@@ -2762,6 +3242,8 @@ repeat:
goto fail;
}
+ /* TODO: cluster can be compressed due to race with .writepage */
+
*pagep = page;
err = prepare_write_begin(sbi, page, pos, len,
@@ -2845,6 +3327,16 @@ static int f2fs_write_end(struct file *file,
else
SetPageUptodate(page);
}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /* overwrite compressed file */
+ if (f2fs_compressed_file(inode) && fsdata) {
+ f2fs_compress_write_end(inode, fsdata, page->index, copied);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ return copied;
+ }
+#endif
+
if (!copied)
goto unlock_out;
@@ -3145,7 +3637,8 @@ int f2fs_migrate_page(struct address_space *mapping,
#ifdef CONFIG_SWAP
/* Copied from generic_swapfile_activate() to check any holes */
-static int check_swap_activate(struct file *swap_file, unsigned int max)
+static int check_swap_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *span)
{
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
@@ -3156,6 +3649,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
sector_t last_block;
sector_t lowest_block = -1;
sector_t highest_block = 0;
+ int nr_extents = 0;
+ int ret;
blkbits = inode->i_blkbits;
blocks_per_page = PAGE_SIZE >> blkbits;
@@ -3167,7 +3662,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
probe_block = 0;
page_no = 0;
last_block = i_size_read(inode) >> blkbits;
- while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
+ while ((probe_block + blocks_per_page) <= last_block &&
+ page_no < sis->max) {
unsigned block_in_page;
sector_t first_block;
@@ -3207,13 +3703,27 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
highest_block = first_block;
}
+ /*
+ * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
+ */
+ ret = add_swap_extent(sis, page_no, 1, first_block);
+ if (ret < 0)
+ goto out;
+ nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
- return 0;
-
+ ret = nr_extents;
+ *span = 1 + highest_block - lowest_block;
+ if (page_no == 0)
+ page_no = 1; /* force Empty message */
+ sis->max = page_no;
+ sis->pages = page_no - 1;
+ sis->highest_bit = page_no - 1;
+out:
+ return ret;
bad_bmap:
pr_err("swapon: swapfile has holes\n");
return -EINVAL;
@@ -3235,14 +3745,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
return ret;
- ret = check_swap_activate(file, sis->max);
- if (ret)
+ if (f2fs_disable_compressed_file(inode))
+ return -EINVAL;
+
+ ret = check_swap_activate(sis, file, span);
+ if (ret < 0)
return ret;
set_inode_flag(inode, FI_PIN_FILE);
f2fs_precache_extents(inode);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- return 0;
+ return ret;
}
static void f2fs_swap_deactivate(struct file *file)
@@ -3319,6 +3832,27 @@ void f2fs_destroy_post_read_processing(void)
kmem_cache_destroy(bio_post_read_ctx_cache);
}
+int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
+{
+ if (!f2fs_sb_has_encrypt(sbi) &&
+ !f2fs_sb_has_verity(sbi) &&
+ !f2fs_sb_has_compression(sbi))
+ return 0;
+
+ sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ num_online_cpus());
+ if (!sbi->post_read_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
+{
+ if (sbi->post_read_wq)
+ destroy_workqueue(sbi->post_read_wq);
+}
+
int __init f2fs_init_bio_entry_cache(void)
{
bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
@@ -3328,7 +3862,7 @@ int __init f2fs_init_bio_entry_cache(void)
return 0;
}
-void __exit f2fs_destroy_bio_entry_cache(void)
+void f2fs_destroy_bio_entry_cache(void)
{
kmem_cache_destroy(bio_entry_slab);
}