lkml.org 
[lkml]   [2020]   [Oct]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 12/19] mm/hugetlb: Support freeing vmemmap pages of gigantic page
Date
The gigantic page is allocated by bootmem, if we want to free the
unused vmemmap pages. We also should allocate the page table. So
we also allocate page tables from bootmem.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
include/linux/hugetlb.h | 3 +++
mm/hugetlb.c | 57 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 60 insertions(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 919f47d77117..695d3041ae7d 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -506,6 +506,9 @@ struct hstate {
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+ pte_t *vmemmap_pgtable;
+#endif
};

struct page *alloc_huge_page(struct vm_area_struct *vma,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f75b93fb4c07..d98b55ad1a90 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1410,6 +1410,48 @@ static inline void vmemmap_pgtable_free(struct hstate *h, struct page *page)
pte_free_kernel(&init_mm, pte_p);
}

+static unsigned long __init gather_vmemmap_pgtable_prealloc(void)
+{
+ struct huge_bootmem_page *m, *tmp;
+ unsigned long nr_free = 0;
+
+ list_for_each_entry_safe(m, tmp, &huge_boot_pages, list) {
+ struct hstate *h = m->hstate;
+ unsigned int pgtable_size = nr_pgtable(h) << PAGE_SHIFT;
+
+ if (!pgtable_size)
+ continue;
+
+ m->vmemmap_pgtable = memblock_alloc_try_nid(pgtable_size,
+ PAGE_SIZE, 0, MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+ if (!m->vmemmap_pgtable) {
+ nr_free++;
+ list_del(&m->list);
+ memblock_free_early(__pa(m), huge_page_size(h));
+ }
+ }
+
+ return nr_free;
+}
+
+static void __init gather_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+ struct page *page)
+{
+ int i;
+ struct hstate *h = m->hstate;
+ unsigned long pgtable = (unsigned long)m->vmemmap_pgtable;
+ unsigned int nr = nr_pgtable(h);
+
+ if (!nr)
+ return;
+
+ vmemmap_pgtable_init(page);
+
+ for (i = 0; i < nr; i++, pgtable += PAGE_SIZE)
+ vmemmap_pgtable_deposit(page, (pte_t *)pgtable);
+}
+
static void __init hugetlb_vmemmap_init(struct hstate *h)
{
unsigned int order = huge_page_order(h);
@@ -1778,6 +1820,16 @@ static inline void vmemmap_pgtable_free(struct hstate *h, struct page *page)
{
}

+static inline unsigned long gather_vmemmap_pgtable_prealloc(void)
+{
+ return 0;
+}
+
+static inline void gather_vmemmap_pgtable_init(struct huge_bootmem_page *m,
+ struct page *page)
+{
+}
+
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}
@@ -3039,6 +3091,7 @@ static void __init gather_bootmem_prealloc(void)
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
WARN_ON(PageReserved(page));
+ gather_vmemmap_pgtable_init(m, page);
prep_new_huge_page(h, page, page_to_nid(page));
put_page(page); /* free it into the hugepage allocator */

@@ -3091,6 +3144,10 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
break;
cond_resched();
}
+
+ if (hstate_is_gigantic(h))
+ i -= gather_vmemmap_pgtable_prealloc();
+
if (i < h->max_huge_pages) {
char buf[32];

--
2.20.1
\
 
 \ /
  Last update: 2020-10-26 15:55    [W:0.206 / U:0.460 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site