diff options
author | Kuanling Huang | 2017-09-15 16:47:45 +0800 |
---|---|---|
committer | David Sterba | 2017-10-30 12:27:57 +0100 |
commit | eef16ba269ea1d55ca1b4eab8d91f02e185835c9 (patch) | |
tree | f7293754eb9652741d9516475d6282716efcc938 /fs | |
parent | 785884fc31883a11e88710d89c525bb7f76e33df (diff) |
Btrfs: send, apply asynchronous page cache readahead to enhance page read
By analyzing the perf on btrfs send, we found it take large amount of
cpu time on page_cache_sync_readahead. This effort can be reduced after
switching to asynchronous one. Overall performance gain on HDD and SSD
were 9 and 15 percent if simply send a large file.
Signed-off-by: Kuanling Huang <peterh@synology.com>
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/send.c | 21 |
1 files changed, 16 insertions, 5 deletions
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 07445be8c1cc..0746eda7231d 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4720,16 +4720,27 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) /* initial readahead */ memset(&sctx->ra, 0, sizeof(struct file_ra_state)); file_ra_state_init(&sctx->ra, inode->i_mapping); - page_cache_sync_readahead(inode->i_mapping, &sctx->ra, NULL, index, - last_index - index + 1); while (index <= last_index) { unsigned cur_len = min_t(unsigned, len, PAGE_SIZE - pg_offset); - page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); + + page = find_lock_page(inode->i_mapping, index); if (!page) { - ret = -ENOMEM; - break; + page_cache_sync_readahead(inode->i_mapping, &sctx->ra, + NULL, index, last_index + 1 - index); + + page = find_or_create_page(inode->i_mapping, index, + GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + break; + } + } + + if (PageReadahead(page)) { + page_cache_async_readahead(inode->i_mapping, &sctx->ra, + NULL, page, index, last_index + 1 - index); } if (!PageUptodate(page)) { |