Messages in this thread Patch in this message | | | From | David Howells <> | Subject | [PATCH 2/2] High-order page management overhaul | Date | Tue, 23 Nov 2004 18:08:13 +0000 |
| |
The attached patch overhauls high-order page handling.
(1) A new bit flag PG_compound_slave has been added. This is used to mark the second+ subpages of a compound page, thus making get_page() and put_page() able to determine the need to perform weird stuff quickly.
This could be changed to do horribly things with the page count or to abuse the page->lru member instead of eating another page flag.
(2) Compound page metadata is now always set on high-order pages when allocating and always checked when freeing. This metadata is mostly as it was before:
- PG_compound is set on all subpages - PG_compound_slave is set on all but the first subpage <--- [1] - page[1].index holds the compound page order - page[1...N-1].private points to page[0]. <--- [2] - page[1].mapping may hold a destructor function for put_page()
This is now done in prep_new_page().
[1] New metadata addition [2] Page private is no longer modified on page[0]
(3) __page_first() is now provided to find the first page of any page set (even single page sets).
(4) A new config option ENHANCED_COMPOUND_PAGES is now available. This is only set on !MMU or HUGETLB_PAGE. It causes __page_first() to dereference page->private if PG_compound_slave is set.
(5) __GFP_COMP is no longer required since compound metadata is always now set.
(6) compound_page_order() is now available. This will indicate the order of any page, high-order or not.
Since it is now trivial to work out the order of any page, free_pages() and co could all lose their order arguments.
(7) bad_page() now prints more information, including information about more pages in the case of a compound page.
(8) prep_compound_page() and destroy_compound_page() have been absorbed.
(9) A lot more unlikely() clauses have been inserted in the free page checking functions.
(10) The !MMU bits have all gone from page_alloc.c.
(11) There's now a page destructor prototype and a function to set the destructor on compound pages.
Signed-Off-By: David Howells <dhowells@redhat.com> --- warthog>diffstat compound-2610rc2mm3-2.diff include/linux/gfp.h | 3 include/linux/mm.h | 65 ++++++--- include/linux/page-flags.h | 6 init/Kconfig | 13 + mm/hugetlb.c | 6 mm/page_alloc.c | 325 +++++++++++++++++++++++++++------------------ 6 files changed, 269 insertions(+), 149 deletions(-)
diff -uNrp linux-2.6.10-rc2-mm3-mmcleanup/include/linux/gfp.h linux-2.6.10-rc2-mm3-frv/include/linux/gfp.h --- linux-2.6.10-rc2-mm3-mmcleanup/include/linux/gfp.h 2004-11-22 10:54:16.000000000 +0000 +++ linux-2.6.10-rc2-mm3-frv/include/linux/gfp.h 2004-11-22 11:45:09.000000000 +0000 @@ -36,7 +36,6 @@ struct vm_area_struct; #define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */ -#define __GFP_COMP 0x4000 /* Add compound page metadata */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) @@ -44,7 +43,7 @@ struct vm_area_struct; /* if you forget to add the bitmask here kernel will crash, period */ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ - __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP) + __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW) #define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) diff -uNrp linux-2.6.10-rc2-mm3-mmcleanup/include/linux/mm.h linux-2.6.10-rc2-mm3-frv/include/linux/mm.h --- linux-2.6.10-rc2-mm3-mmcleanup/include/linux/mm.h 2004-11-22 10:54:16.000000000 +0000 +++ linux-2.6.10-rc2-mm3-frv/include/linux/mm.h 2004-11-23 12:24:38.000000000 +0000 @@ -227,6 +227,12 @@ typedef unsigned long page_flags_t; * it to keep track of whatever it is we are using the page for at the * moment. Note that we have no way to track which tasks are using * a page. + * + * Any high-order page allocation has all the pages marked PG_compound. The + * first page of such a block holds the block's usage count and control + * data. The second page holds the order in its index member and a destructor + * function pointer in its mapping member. In enhanced compound page mode, the + * second+ pages have their private pointers pointing at the first page. */ struct page { page_flags_t flags; /* Atomic flags, some possibly @@ -314,45 +320,72 @@ struct page { */ #define get_page_testone(p) atomic_inc_and_test(&(p)->_count) -#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1) +#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) #define __put_page(p) atomic_dec(&(p)->_count) extern void FASTCALL(__page_cache_release(struct page *)); -#ifdef CONFIG_HUGETLB_PAGE - -static inline int page_count(struct page *p) +static inline struct page *page_head(struct page *page) { - if (PageCompound(p)) - p = (struct page *)p->private; - return atomic_read(&(p)->_count) + 1; +#ifdef CONFIG_ENHANCED_COMPOUND_PAGES + if (unlikely(PageCompoundSlave(page))) + page = (struct page *) page->private; +#endif + return page; } -static inline void get_page(struct page *page) +static inline unsigned compound_page_order(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page->private; - atomic_inc(&page->_count); + unsigned order = 0; + + if (unlikely(PageCompound(page))) { + page = page_head(page); + order = page[1].index; + } + return order; } -void put_page(struct page *page); +typedef void (*page_dtor_t)(struct page *); + +static inline page_dtor_t page_dtor(struct page *page) +{ + page_dtor_t dtor = NULL; + + if (unlikely(PageCompound(page))) { + page = page_head(page); + dtor = (page_dtor_t) page[1].mapping; + } + return dtor; +} -#else /* CONFIG_HUGETLB_PAGE */ +static inline void set_page_dtor(struct page *page, page_dtor_t dtor) +{ + BUG_ON(!PageCompound(page)); + BUG_ON(PageCompoundSlave(page)); + page[1].mapping = (void *) dtor; +} -#define page_count(p) (atomic_read(&(p)->_count) + 1) +static inline int page_count(struct page *page) +{ + page = page_head(page); + return atomic_read(&page->_count) + 1; +} static inline void get_page(struct page *page) { + page = page_head(page); atomic_inc(&page->_count); } +#ifdef CONFIG_ENHANCED_COMPOUND_PAGES +extern fastcall void put_page(struct page *page); +#else static inline void put_page(struct page *page) { if (!PageReserved(page) && put_page_testzero(page)) __page_cache_release(page); } - -#endif /* CONFIG_HUGETLB_PAGE */ +#endif /* CONFIG_COMPOUND_PAGE */ /* * Multiple processes may "see" the same page. E.g. for untouched diff -uNrp linux-2.6.10-rc2-mm3-mmcleanup/include/linux/page-flags.h linux-2.6.10-rc2-mm3-frv/include/linux/page-flags.h --- linux-2.6.10-rc2-mm3-mmcleanup/include/linux/page-flags.h 2004-11-22 10:54:16.000000000 +0000 +++ linux-2.6.10-rc2-mm3-frv/include/linux/page-flags.h 2004-11-22 11:45:09.000000000 +0000 @@ -78,6 +78,7 @@ #define PG_sharedpolicy 19 /* Page was allocated for a file mapping using a shared_policy */ +#define PG_compound_slave 20 /* second+ page of a compound page */ /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -294,6 +295,11 @@ extern unsigned long __read_page_state(u #define PageCompound(page) test_bit(PG_compound, &(page)->flags) #define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) #define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) +#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) + +#define PageCompoundSlave(page) test_bit(PG_compound_slave, &(page)->flags) +#define SetPageCompoundSlave(page) set_bit(PG_compound_slave, &(page)->flags) +#define ClearPageCompoundSlave(page) clear_bit(PG_compound_slave, &(page)->flags) #define PageSharedPolicy(page) test_bit(PG_sharedpolicy, &(page)->flags) #define SetPageSharedPolicy(page) set_bit(PG_sharedpolicy, &(page)->flags) diff -uNrp linux-2.6.10-rc2-mm3-mmcleanup/init/Kconfig linux-2.6.10-rc2-mm3-frv/init/Kconfig --- linux-2.6.10-rc2-mm3-mmcleanup/init/Kconfig 2004-11-22 10:54:17.000000000 +0000 +++ linux-2.6.10-rc2-mm3-frv/init/Kconfig 2004-11-22 11:45:10.000000000 +0000 @@ -380,6 +380,19 @@ config TINY_SHMEM default !SHMEM bool +config ENHANCED_COMPOUND_PAGES + bool + default HUGETLB_PAGE || !MMU + help + + Enhance management of high-order pages by pointing the 2nd+ pages at + the first. get_page() and put_page() then use the usage count on the + first page to manage all the pages in the block. + + This is used when it might be necessary to access the intermediate + pages of a block, such as ptrace() might under nommu of hugetlb + conditions. + menu "Loadable module support" config MODULES diff -uNrp linux-2.6.10-rc2-mm3-mmcleanup/mm/hugetlb.c linux-2.6.10-rc2-mm3-frv/mm/hugetlb.c --- linux-2.6.10-rc2-mm3-mmcleanup/mm/hugetlb.c 2004-11-22 10:54:18.000000000 +0000 +++ linux-2.6.10-rc2-mm3-frv/mm/hugetlb.c 2004-11-23 15:40:28.701814367 +0000 @@ -52,7 +52,7 @@ static struct page *alloc_fresh_huge_pag { static int nid = 0; struct page *page; - page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, + page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_NOWARN, HUGETLB_PAGE_ORDER); nid = (nid + 1) % numnodes; if (page) { @@ -67,7 +67,7 @@ void free_huge_page(struct page *page) BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); - page[1].mapping = NULL; + set_page_dtor(page, NULL); spin_lock(&hugetlb_lock); enqueue_huge_page(page); @@ -87,7 +87,7 @@ struct page *alloc_huge_page(void) } spin_unlock(&hugetlb_lock); set_page_count(page, 1); - page[1].mapping = (void *)free_huge_page; + set_page_dtor(page, free_huge_page); for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) clear_highpage(&page[i]); return page; diff -uNrp linux-2.6.10-rc2-mm3-mmcleanup/mm/page_alloc.c linux-2.6.10-rc2-mm3-frv/mm/page_alloc.c --- linux-2.6.10-rc2-mm3-mmcleanup/mm/page_alloc.c 2004-11-23 16:13:04.184628888 +0000 +++ linux-2.6.10-rc2-mm3-frv/mm/page_alloc.c 2004-11-23 16:29:35.512593040 +0000 @@ -80,15 +80,61 @@ static int bad_range(struct zone *zone, return 0; } -static void bad_page(const char *function, struct page *page) +static inline void __bad_page(struct page *page) { - printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", - function, current->comm, page); - printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", - (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags, - page->mapping, page_mapcount(page), page_count(page)); + const char *fmt; + + if (sizeof(void *) == 4) + fmt = KERN_EMERG "%08lx %p %08x %p %8x %8x %8lx %8lx\n"; + else + fmt = KERN_EMERG "%016lx %p %08x %p %8x %8x %16lx %16lx\n"; + + printk(fmt, + page_to_pfn(page), + page, + (unsigned) page->flags, + page->mapping, page_mapcount(page), page_count(page), + page->index, page->private); +} + +static void bad_page(const char *function, struct page *page, + struct page *page0, int order) +{ + printk(KERN_EMERG "\n"); + printk(KERN_EMERG + "Bad page state at %s (in process '%s', order %d)\n", + function, current->comm, order); + + if (sizeof(void *) == 4) { + printk(KERN_EMERG + "PFN PAGE* FLAGS MAPPING MAPCOUNT COUNT INDEX PRIVATE\n"); + printk(KERN_EMERG + "======== ======== ======== ======== ======== ======== ======== ========\n"); + } + else { + printk(KERN_EMERG + "PFN PAGE* FLAGS MAPPING MAPCOUNT COUNT INDEX PRIVATE\n"); + printk(KERN_EMERG + "================ ================ ======== ================ ======== ======== ================ ================\n"); + } + + /* print extra details on a compound page */ + if (PageCompound(page0)) { + __bad_page(page0); + __bad_page(page0 + 1); + + if (page > page0 + 1) { + if (page > page0 + 2) + printk(KERN_EMERG "...\n"); + __bad_page(page); + } + } else { + __bad_page(page); + } + printk(KERN_EMERG "Backtrace:\n"); dump_stack(); + printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); page->flags &= ~(1 << PG_private | 1 << PG_locked | @@ -103,82 +149,6 @@ static void bad_page(const char *functio tainted |= TAINT_BAD_PAGE; } -void set_page_refs(struct page *page, int order) -{ -#ifdef CONFIG_MMU - set_page_count(page, 1); -#else - int i; - - /* - * We need to reference all the pages for this order, otherwise if - * anyone accesses one of the pages with (get/put) it will be freed. - * - eg: access_process_vm() - */ - for (i = 0; i < (1 << order); i++) - set_page_count(page + i, 1); -#endif /* CONFIG_MMU */ -} - -#ifndef CONFIG_HUGETLB_PAGE -#define prep_compound_page(page, order) do { } while (0) -#define destroy_compound_page(page, order) do { } while (0) -#else -/* - * Higher-order pages are called "compound pages". They are structured thusly: - * - * The first PAGE_SIZE page is called the "head page". - * - * The remaining PAGE_SIZE pages are called "tail pages". - * - * All pages have PG_compound set. All pages have their ->private pointing at - * the head page (even the head page has this). - * - * The first tail page's ->mapping, if non-zero, holds the address of the - * compound page's put_page() function. - * - * The order of the allocation is stored in the first tail page's ->index - * This is only for debug at present. This usage means that zero-order pages - * may not be compound. - */ -static void prep_compound_page(struct page *page, unsigned long order) -{ - int i; - int nr_pages = 1 << order; - - page[1].mapping = NULL; - page[1].index = order; - for (i = 0; i < nr_pages; i++) { - struct page *p = page + i; - - SetPageCompound(p); - p->private = (unsigned long)page; - } -} - -static void destroy_compound_page(struct page *page, unsigned long order) -{ - int i; - int nr_pages = 1 << order; - - if (!PageCompound(page)) - return; - - if (page[1].index != order) - bad_page(__FUNCTION__, page); - - for (i = 0; i < nr_pages; i++) { - struct page *p = page + i; - - if (!PageCompound(p)) - bad_page(__FUNCTION__, page); - if (p->private != (unsigned long)page) - bad_page(__FUNCTION__, page); - ClearPageCompound(p); - } -} -#endif /* CONFIG_HUGETLB_PAGE */ - /* * function for dealing with page's order in buddy system. * zone->lock is already acquired when we use these. @@ -201,6 +171,11 @@ static inline void rmv_page_order(struct page->private = 0; } +static inline void set_page_refs(struct page *page, int order) +{ + set_page_count(page, 1); +} + /* * This function checks whether a page is free && is the buddy * we can do coalesce a page and its buddy if @@ -221,6 +196,93 @@ static inline int page_is_buddy(struct p } /* + * validate a page that's being handed back for recycling + */ +static +void free_pages_check_compound(const char *function, struct page *page, int order) +{ + struct page *xpage; + int i; + + xpage = page; + + if (unlikely(order == 0 || + PageCompoundSlave(page) + )) + goto badpage; + + xpage++; + if (unlikely(xpage->index != order)) + goto badpage; + + for (i = (1 << order) - 1; i > 0; i--) { + if (unlikely(!PageCompound(xpage) || + !PageCompoundSlave(xpage) || + (xpage->flags & ( + 1 << PG_lru | + 1 << PG_private | + 1 << PG_locked | + 1 << PG_active | + 1 << PG_reclaim | + 1 << PG_slab | + 1 << PG_swapcache | + 1 << PG_writeback + )) || + page_count(xpage) != 0 || + page_mapped(xpage) || + xpage->mapping != NULL || + xpage->private != (unsigned long) page + )) + goto badpage; + + if (PageDirty(xpage)) + ClearPageDirty(xpage); + xpage++; + } + + return; + + badpage: + bad_page(function, xpage, page, order); + return; +} + +static inline +void free_pages_check(const char *function, struct page *page, int order) +{ + if (unlikely( + page_mapped(page) || + page->mapping != NULL || + page_count(page) != 0 || + (page->flags & ( + 1 << PG_lru | + 1 << PG_private | + 1 << PG_locked | + 1 << PG_active | + 1 << PG_reclaim | + 1 << PG_slab | + 1 << PG_swapcache | + 1 << PG_writeback )) + )) + goto badpage; + + /* check that compound pages are correctly assembled */ + if (unlikely(PageCompound(page))) + free_pages_check_compound(function, page, order); + else if (unlikely(order > 0)) + goto badpage; + + if (PageDirty(page)) + ClearPageDirty(page); + + return; + + badpage: + bad_page(function, page, page, order); + return; +} + +/* * Freeing function for a buddy system allocator. * * The concept of a buddy system is to maintain direct-mapped table @@ -251,8 +313,14 @@ static inline void __free_pages_bulk (st struct page *coalesced; int order_size = 1 << order; - if (unlikely(order)) - destroy_compound_page(page, order); + if (unlikely(PageCompound(page))) { + struct page *xpage = page; + int i; + + for (i = (1 << order); i > 0; i--) + (xpage++)->flags &= + ~(1 << PG_compound | 1 << PG_compound_slave); + } page_idx = page - base; @@ -285,25 +353,6 @@ static inline void __free_pages_bulk (st zone->free_area[order].nr_free++; } -static inline void free_pages_check(const char *function, struct page *page) -{ - if ( page_mapped(page) || - page->mapping != NULL || - page_count(page) != 0 || - (page->flags & ( - 1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_reclaim | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback ))) - bad_page(function, page); - if (PageDirty(page)) - ClearPageDirty(page); -} - /* * Frees a list of pages. * Assumes all pages on list are in same zone, and of same order. @@ -341,20 +390,12 @@ free_pages_bulk(struct zone *zone, int c void __free_pages_ok(struct page *page, unsigned int order) { LIST_HEAD(list); - int i; arch_free_page(page, order); mod_page_state(pgfree, 1 << order); -#ifndef CONFIG_MMU - if (order > 0) - for (i = 1 ; i < (1 << order) ; ++i) - __put_page(page + i); -#endif - - for (i = 0 ; i < (1 << order) ; ++i) - free_pages_check(__FUNCTION__, page + i); + free_pages_check(__FUNCTION__, page, order); list_add(&page->lru, &list); kernel_map_pages(page, 1 << order, 0); free_pages_bulk(page_zone(page), 1, &list, order); @@ -421,8 +462,11 @@ expand(struct zone *zone, struct page *p */ static void prep_new_page(struct page *page, int order) { + page_flags_t pgflags = page->flags; + + /* check the struct page hasn't become corrupted */ if (page->mapping || page_mapped(page) || - (page->flags & ( + (pgflags & ( 1 << PG_private | 1 << PG_locked | 1 << PG_lru | @@ -430,14 +474,40 @@ static void prep_new_page(struct page *p 1 << PG_dirty | 1 << PG_reclaim | 1 << PG_swapcache | - 1 << PG_writeback ))) - bad_page(__FUNCTION__, page); + 1 << PG_writeback | + 1 << PG_compound | + 1 << PG_compound_slave))) + bad_page(__FUNCTION__, page, page, order); + + pgflags &= ~(1 << PG_uptodate | 1 << PG_error | + 1 << PG_referenced | 1 << PG_arch_1 | + 1 << PG_checked | 1 << PG_mappedtodisk); - page->flags &= ~(1 << PG_uptodate | 1 << PG_error | - 1 << PG_referenced | 1 << PG_arch_1 | - 1 << PG_checked | 1 << PG_mappedtodisk); page->private = 0; + + /* set the refcount on the page */ set_page_refs(page, order); + + /* mark all pages as being compound and store high-order page metadata + * on the second page */ + if (order > 0) { + struct page *xpage; + int i; + + pgflags |= 1 << PG_compound; + + page[1].index = order; + page[1].mapping = NULL; /* no destructor yet */ + + xpage = page + 1; + for (i = (1 << order) - 1; i > 0; i--) { + xpage->flags |= 1 << PG_compound | 1 << PG_compound_slave; + xpage->private = (unsigned long) page; + xpage++; + } + } + + page->flags = pgflags; } /* @@ -589,7 +659,7 @@ void fastcall free_hot_cold_page(struct inc_page_state(pgfree); if (PageAnon(page)) page->mapping = NULL; - free_pages_check(__FUNCTION__, page); + free_pages_check(__FUNCTION__, page, 0); pcp = &zone->pageset[get_cpu()].pcp[cold]; local_irq_save(flags); if (pcp->count >= pcp->high) @@ -741,8 +811,6 @@ buffered_rmqueue(struct zone *zone, int BUG_ON(bad_range(zone, page)); mod_page_state_zone(zone, pgalloc, 1 << order); prep_new_page(page, order); - if (order && (gfp_flags & __GFP_COMP)) - prep_compound_page(page, order); } return page; } @@ -1003,23 +1071,24 @@ fastcall void free_pages(unsigned long a EXPORT_SYMBOL(free_pages); -#ifdef CONFIG_HUGETLB_PAGE - -void put_page(struct page *page) +#ifdef CONFIG_ENHANCED_COMPOUND_PAGES +fastcall void put_page(struct page *page) { if (unlikely(PageCompound(page))) { - page = (struct page *)page->private; + page = (struct page *) page->private; if (put_page_testzero(page)) { - void (*dtor)(struct page *page); + page_dtor_t dtor; - dtor = (void (*)(struct page *))page[1].mapping; + dtor = (page_dtor_t) page[1].mapping; (*dtor)(page); } return; } - if (!PageReserved(page) && put_page_testzero(page)) + + if (likely(!PageReserved(page)) && put_page_testzero(page)) __page_cache_release(page); } + EXPORT_SYMBOL(put_page); #endif - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
| |