aboutsummaryrefslogtreecommitdiff
path: root/fs/hugetlbfs/inode.c
diff options
context:
space:
mode:
authorzhong jiang2016-10-07 17:02:01 -0700
committerLinus Torvalds2016-10-07 18:46:29 -0700
commit72e2936c04f7d2a4bf87d7f72d3bf11cf91ebb47 (patch)
treee074463b15c0c725afca88f16a5683ae437e0459 /fs/hugetlbfs/inode.c
parent63f53dea0c9866e93802d50a230c460a024c44e5 (diff)
mm: remove unnecessary condition in remove_inode_hugepages
When the huge page is added to the page cahce (huge_add_to_page_cache), the page private flag will be cleared. since this code (remove_inode_hugepages) will only be called for pages in the page cahce, PagePrivate(page) will always be false. The patch remove the code without any functional change. Link: http://lkml.kernel.org/r/1475113323-29368-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang <zhongjiang@huawei.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Tested-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r--fs/hugetlbfs/inode.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4ea71eba40a5..7337cac29e9e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -416,7 +416,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
- bool rsv_on_error;
u32 hash;
/*
@@ -458,18 +457,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* cache (remove_huge_page) BEFORE removing the
* region/reserve map (hugetlb_unreserve_pages). In
* rare out of memory conditions, removal of the
- * region/reserve map could fail. Before free'ing
- * the page, note PagePrivate which is used in case
- * of error.
+ * region/reserve map could fail. Correspondingly,
+ * the subpool and global reserve usage count can need
+ * to be adjusted.
*/
- rsv_on_error = !PagePrivate(page);
+ VM_BUG_ON(PagePrivate(page));
remove_huge_page(page);
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
next, next + 1, 1)))
- hugetlb_fix_reserve_counts(inode,
- rsv_on_error);
+ hugetlb_fix_reserve_counts(inode);
}
unlock_page(page);