lkml.org 
[lkml]   [2020]   [Dec]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    SubjectRe: [PATCH v9 05/11] mm/hugetlb: Allocate the vmemmap pages associated with each HugeTLB page
    From
    Date
    On 12/13/20 7:45 AM, Muchun Song wrote:
    > When we free a HugeTLB page to the buddy allocator, we should allocate the
    > vmemmap pages associated with it. We can do that in the __free_hugepage()
    > before freeing it to buddy.

    ...

    > diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
    > index 78c527617e8d..ffcf092c92ed 100644
    > --- a/mm/sparse-vmemmap.c
    > +++ b/mm/sparse-vmemmap.c
    > @@ -29,6 +29,7 @@
    > #include <linux/sched.h>
    > #include <linux/pgtable.h>
    > #include <linux/bootmem_info.h>
    > +#include <linux/delay.h>
    >
    > #include <asm/dma.h>
    > #include <asm/pgalloc.h>
    > @@ -39,7 +40,8 @@
    > *
    > * @rmap_pte: called for each non-empty PTE (lowest-level) entry.
    > * @reuse: the page which is reused for the tail vmemmap pages.
    > - * @vmemmap_pages: the list head of the vmemmap pages that can be freed.
    > + * @vmemmap_pages: the list head of the vmemmap pages that can be freed
    > + * or is mapped from.
    > */
    > struct vmemmap_rmap_walk {
    > void (*rmap_pte)(pte_t *pte, unsigned long addr,
    > @@ -54,6 +56,9 @@ struct vmemmap_rmap_walk {
    > */
    > #define VMEMMAP_TAIL_PAGE_REUSE -1
    >
    > +/* The gfp mask of allocating vmemmap page */
    > +#define GFP_VMEMMAP_PAGE (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
    > +
    > static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
    > unsigned long end, struct vmemmap_rmap_walk *walk)
    > {
    > @@ -200,6 +205,68 @@ void vmemmap_remap_reuse(unsigned long start, unsigned long size)
    > free_vmemmap_page_list(&vmemmap_pages);
    > }
    >
    > +static void vmemmap_remap_restore_pte(pte_t *pte, unsigned long addr,
    > + struct vmemmap_rmap_walk *walk)
    > +{
    > + pgprot_t pgprot = PAGE_KERNEL;
    > + struct page *page;
    > + void *to;
    > +
    > + BUG_ON(pte_page(*pte) != walk->reuse);
    > +
    > + page = list_first_entry(walk->vmemmap_pages, struct page, lru);
    > + list_del(&page->lru);
    > + to = page_to_virt(page);
    > + copy_page(to, page_to_virt(walk->reuse));
    > +
    > + set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
    > +}
    > +
    > +static void alloc_vmemmap_page_list(struct list_head *list,
    > + unsigned long nr_pages)
    > +{
    > + while (nr_pages--) {
    > + struct page *page;
    > +
    > +retry:
    > + page = alloc_page(GFP_VMEMMAP_PAGE);

    Should we try (or require) the vmemmap page be on the same node as the
    pages they describe? I imagine performance would be impacted if a
    struct page and the page it describes are on different numa nodes.

    > + if (unlikely(!page)) {
    > + msleep(100);
    > + /*
    > + * We should retry infinitely, because we cannot
    > + * handle allocation failures. Once we allocate
    > + * vmemmap pages successfully, then we can free
    > + * a HugeTLB page.
    > + */
    > + goto retry;
    > + }
    > + list_add_tail(&page->lru, list);
    > + }
    > +}
    > +

    --
    Mike Kravetz

    \
     
     \ /
      Last update: 2020-12-17 02:21    [W:4.100 / U:0.748 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site