aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorFan Li2016-02-29 14:29:51 +0800
committerJaegeuk Kim2016-03-17 21:19:43 -0700
commit999270de31138f1ae41743a856911f8e5e114264 (patch)
treebdaf82728ffc7db19e00bffe41353f53388f4c30 /fs
parent8074bb515014d281a6f5f1218648aa3abd9c22ab (diff)
f2fs: modify the readahead method in ra_node_page()
ra_node_page() is used to read ahead one node page. Comparing to regular read, it's faster because it doesn't wait for IO completion. But if it is called twice for reading the same block, and the IO request from the first call hasn't been completed before the second call, the second call will have to wait until the read is over. Here use the code in __do_page_cache_readahead() to solve this problem. It does nothing when someone else already puts the page in mapping. The status of page should be assured by whoever puts it there. This implement also prevents alteration of page reference count. Signed-off-by: Fan li <fanofcode.li@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/node.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d714b607bf46..7b805f7f6340 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1084,12 +1084,11 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
return;
f2fs_bug_on(sbi, check_nid_range(sbi, nid));
- apage = find_get_page(NODE_MAPPING(sbi), nid);
- if (apage && PageUptodate(apage)) {
- f2fs_put_page(apage, 0);
+ rcu_read_lock();
+ apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
+ rcu_read_unlock();
+ if (apage)
return;
- }
- f2fs_put_page(apage, 0);
apage = grab_cache_page(NODE_MAPPING(sbi), nid);
if (!apage)