lkml.org 
[lkml]   [2021]   [Jul]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/8] hugetlb: add HPageCma flag and code to free non-gigantic pages in CMA
Date
When huge page demotion is fully implemented, gigantic pages can be
demoted to a smaller huge page size. For example, on x86 a 1G page
can be demoted to 512 2M pages. However, gigantic pages can potentially
be allocated from CMA. If a gigantic page which was allocated from CMA
is demoted, the corresponding demoted pages need to be returned to CMA.

In order to track hugetlb pages that need to be returned to CMA, add the
hugetlb specific flag HPageCma. Flag is set when a huge page is
allocated from CMA and transferred to any demoted pages. Non-gigantic
huge page freeing code checks for the flag and takes appropriate action.

This also requires a change to CMA reservations for gigantic pages.
Currently, the 'order_per_bit' is set to the gigantic page size.
However, if gigantic pages can be demoted this needs to be set to the
order of the smallest huge page. At CMA reservation time we do not know
the size of the smallest huge page size, so use HUGETLB_PAGE_ORDER.
Also, prohibit demotion to huge page sizes smaller than
HUGETLB_PAGE_ORDER.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
include/linux/hugetlb.h | 7 +++++++
mm/hugetlb.c | 46 ++++++++++++++++++++++++++++++-----------
2 files changed, 41 insertions(+), 12 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d96e11ce986c..60aa7e9fe2b9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -533,6 +533,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
* HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_cma - Set if huge page was directly allocated from CMA area via
+ * cma_alloc. Initially set for gigantic page cma allocations, but can
+ * be set in non-gigantic pages if gigantic pages are demoted.
+ * Synchronization: Only accessed or modified when there is only one
+ * reference to the page at allocation, free or demote time.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
@@ -540,6 +545,7 @@ enum hugetlb_page_flags {
HPG_temporary,
HPG_freed,
HPG_vmemmap_optimized,
+ HPG_cma,
__NR_HPAGEFLAGS,
};

@@ -586,6 +592,7 @@ HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(Cma, cma)

#ifdef CONFIG_HUGETLB_PAGE

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cebc6dc353f3..d034a223d5d7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1262,6 +1262,7 @@ static void destroy_compound_gigantic_page(struct page *page,
atomic_set(compound_pincount_ptr(page), 0);

for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ p->mapping = NULL;
clear_compound_head(p);
set_page_refcounted(p);
}
@@ -1273,16 +1274,12 @@ static void destroy_compound_gigantic_page(struct page *page,

static void free_gigantic_page(struct page *page, unsigned int order)
{
- /*
- * If the page isn't allocated using the cma allocator,
- * cma_release() returns false.
- */
#ifdef CONFIG_CMA
- if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
- return;
+ if (HPageCma(page))
+ cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order);
+ else
#endif
-
- free_contig_range(page_to_pfn(page), 1 << order);
+ free_contig_range(page_to_pfn(page), 1 << order);
}

#ifdef CONFIG_CONTIG_ALLOC
@@ -1301,8 +1298,10 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
if (hugetlb_cma[nid]) {
page = cma_alloc(hugetlb_cma[nid], nr_pages,
huge_page_order(h), true);
- if (page)
+ if (page) {
+ SetHPageCma(page);
return page;
+ }
}

if (!(gfp_mask & __GFP_THISNODE)) {
@@ -1312,8 +1311,10 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,

page = cma_alloc(hugetlb_cma[node], nr_pages,
huge_page_order(h), true);
- if (page)
+ if (page) {
+ SetHPageCma(page);
return page;
+ }
}
}
}
@@ -1470,6 +1471,20 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
destroy_compound_gigantic_page(page, huge_page_order(h));
free_gigantic_page(page, huge_page_order(h));
} else {
+#ifdef CONFIG_CMA
+ /*
+ * Could be a page that was demoted from a gigantic page
+ * which was allocated in a CMA area.
+ */
+ if (HPageCma(page)) {
+ destroy_compound_gigantic_page(page,
+ huge_page_order(h));
+ if (!cma_release(hugetlb_cma[page_to_nid(page)], page,
+ 1 << huge_page_order(h)))
+ VM_BUG_ON_PAGE(1, page);
+ return;
+ }
+#endif
__free_pages(page, huge_page_order(h));
}
}
@@ -2981,7 +2996,8 @@ static void __init hugetlb_init_hstates(void)
for_each_hstate(h2) {
if (h2 == h)
continue;
- if (h2->order < h->order && h2->order > h->demote_order)
+ if (h2->order >= HUGETLB_PAGE_ORDER &&
+ h2->order < h->order && h2->order > h->demote_order)
h->demote_order = h2->order;
}
}
@@ -6496,7 +6512,13 @@ void __init hugetlb_cma_reserve(int order)
size = round_up(size, PAGE_SIZE << order);

snprintf(name, sizeof(name), "hugetlb%d", nid);
- res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
+ /*
+ * Note that 'order per bit' is based on smallest size that
+ * may be returned to CMA allocator in the case of
+ * huge page demotion.
+ */
+ res = cma_declare_contiguous_nid(0, size, 0,
+ PAGE_SIZE << HUGETLB_PAGE_ORDER,
0, false, name,
&hugetlb_cma[nid], nid);
if (res) {
--
2.31.1
\
 
 \ /
  Last update: 2021-07-22 01:06    [W:0.818 / U:0.032 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site