diff options
author | Hugh Dickins | 2005-11-21 21:32:14 -0800 |
---|---|---|
committer | Linus Torvalds | 2005-11-22 09:13:42 -0800 |
commit | 664beed0190fae687ac51295694004902ddeb18e (patch) | |
tree | 89a7c8d9d541fb678c567834cb758fc88b375d47 | |
parent | 1cdca61bf8537043edde8ef784ce1a1351361dac (diff) |
[PATCH] unpaged: unifdefed PageCompound
It looks like snd_xxx is not the only nopage to be using PageReserved as a way
of holding a high-order page together: which no longer works, but is masked by
our failure to free from VM_RESERVED areas. We cannot fix that bug without
first substituting another way to hold the high-order page together, while
farming out the 0-order pages from within it.
That's just what PageCompound is designed for, but it's been kept under
CONFIG_HUGETLB_PAGE. Remove the #ifdefs: which saves some space (out- of-line
put_page), doesn't slow down what most needs to be fast (already using
hugetlb), and unifies the way we handle high-order pages.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/mm.h | 19 | ||||
-rw-r--r-- | include/linux/page-flags.h | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/swap.c | 3 |
4 files changed, 0 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0986d19be0b7..9701210c6680 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -311,8 +311,6 @@ struct page { extern void FASTCALL(__page_cache_release(struct page *)); -#ifdef CONFIG_HUGETLB_PAGE - static inline int page_count(struct page *page) { if (PageCompound(page)) @@ -329,23 +327,6 @@ static inline void get_page(struct page *page) void put_page(struct page *page); -#else /* CONFIG_HUGETLB_PAGE */ - -#define page_count(p) (atomic_read(&(p)->_count) + 1) - -static inline void get_page(struct page *page) -{ - atomic_inc(&page->_count); -} - -static inline void put_page(struct page *page) -{ - if (put_page_testzero(page)) - __page_cache_release(page); -} - -#endif /* CONFIG_HUGETLB_PAGE */ - /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f34767c5fc79..343083fec258 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -287,11 +287,7 @@ extern void __mod_page_state(unsigned long offset, unsigned long delta); #define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) #define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags) -#ifdef CONFIG_HUGETLB_PAGE #define PageCompound(page) test_bit(PG_compound, &(page)->flags) -#else -#define PageCompound(page) 0 -#endif #define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) #define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bd4de592dc23..23b84c4e1a57 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -148,10 +148,6 @@ static void bad_page(const char *function, struct page *page) add_taint(TAINT_BAD_PAGE); } -#ifndef CONFIG_HUGETLB_PAGE -#define prep_compound_page(page, order) do { } while (0) -#define destroy_compound_page(page, order) do { } while (0) -#else /* * Higher-order pages are called "compound pages". They are structured thusly: * @@ -205,7 +201,6 @@ static void destroy_compound_page(struct page *page, unsigned long order) ClearPageCompound(p); } } -#endif /* CONFIG_HUGETLB_PAGE */ /* * function for dealing with page's order in buddy system. diff --git a/mm/swap.c b/mm/swap.c index d09cf7f03e76..73d351439ef6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -34,8 +34,6 @@ /* How many pages do we try to swap or page in/out together? */ int page_cluster; -#ifdef CONFIG_HUGETLB_PAGE - void put_page(struct page *page) { if (unlikely(PageCompound(page))) { @@ -52,7 +50,6 @@ void put_page(struct page *page) __page_cache_release(page); } EXPORT_SYMBOL(put_page); -#endif /* * Writeback is about to end against a page which has been marked for immediate |