lkml.org 
[lkml]   [2022]   [Sep]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v1 03/12] mm: adapt deferred struct page init to new MAX_ORDER.
Date
From: Zi Yan <ziy@nvidia.com>

deferred_init only initializes first section of a zone and defers the
rest and the rest of the zone will be initialized in size of a section.
When MAX_ORDER grows beyond a section size, early_page_uninitialised()
did not prevent pages beyond first section from initialization, since it
only checked the starting pfn and assumes MAX_ORDER is smaller than
a section size. In addition, deferred_init_maxorder() uses
MAX_ORDER_NR_PAGES as the initialization unit, which can cause the
initialized chunk of memory overlapping with other initialization jobs.

For the first issue, make early_page_uninitialised() decrease the order
for non-deferred memory initialization when it is bigger than first
section. For the second issue, when adjust pfn alignment in
deferred_init_maxorder(), make sure the alignment is not bigger than
a section size.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/internal.h | 2 +-
mm/memblock.c | 6 ++++--
mm/page_alloc.c | 28 ++++++++++++++++++++--------
3 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 22fb1e6e3541..d688c0320cda 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -361,7 +361,7 @@ extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
extern void memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order);
+ unsigned int *order);
extern void __free_pages_core(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
diff --git a/mm/memblock.c b/mm/memblock.c
index acbc77367faf..b957c12a93e7 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1640,7 +1640,9 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);

for (; cursor < end; cursor++) {
- memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+ unsigned int order = 0;
+
+ memblock_free_pages(pfn_to_page(cursor), cursor, &order);
totalram_pages_inc();
}
}
@@ -2035,7 +2037,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
while (start + (1UL << order) > end)
order--;

- memblock_free_pages(pfn_to_page(start), start, order);
+ memblock_free_pages(pfn_to_page(start), start, &order);

start += (1UL << order);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b3dd5248e63d..e3af87d89ebf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -464,13 +464,19 @@ static inline bool deferred_pages_enabled(void)
}

/* Returns true if the struct page for the pfn is uninitialised */
-static inline bool __meminit early_page_uninitialised(unsigned long pfn)
+static inline bool __meminit early_page_uninitialised(unsigned long pfn, unsigned int *order)
{
int nid = early_pfn_to_nid(pfn);

if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
return true;

+ /* clamp down order to not exceed first_deferred_pfn */
+ if (order)
+ *order = min_t(unsigned int,
+ *order,
+ ilog2(NODE_DATA(nid)->first_deferred_pfn - pfn));
+
return false;
}

@@ -518,7 +524,7 @@ static inline bool deferred_pages_enabled(void)
return false;
}

-static inline bool early_page_uninitialised(unsigned long pfn)
+static inline bool early_page_uninitialised(unsigned long pfn, unsigned int *order)
{
return false;
}
@@ -1653,7 +1659,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
pg_data_t *pgdat;
int nid, zid;

- if (!early_page_uninitialised(pfn))
+ if (!early_page_uninitialised(pfn, NULL))
return;

nid = early_pfn_to_nid(pfn);
@@ -1809,15 +1815,15 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
#endif /* CONFIG_NUMA */

void __init memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order)
+ unsigned int *order)
{
- if (early_page_uninitialised(pfn))
+ if (early_page_uninitialised(pfn, order))
return;
- if (!kmsan_memblock_free_pages(page, order)) {
+ if (!kmsan_memblock_free_pages(page, *order)) {
/* KMSAN will take care of these pages. */
return;
}
- __free_pages_core(page, order);
+ __free_pages_core(page, *order);
}

/*
@@ -2036,7 +2042,13 @@ static unsigned long __init
deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
unsigned long *end_pfn)
{
- unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
+ /*
+ * deferred_init_memmap_chunk gives out jobs with max size to
+ * PAGES_PER_SECTION. Do not align mo_pfn beyond that.
+ */
+ unsigned long align = min_t(unsigned long,
+ MAX_ORDER_NR_PAGES, PAGES_PER_SECTION);
+ unsigned long mo_pfn = ALIGN(*start_pfn + 1, align);
unsigned long spfn = *start_pfn, epfn = *end_pfn;
unsigned long nr_pages = 0;
u64 j = *i;
--
2.35.1
\
 
 \ /
  Last update: 2022-09-22 03:14    [W:0.073 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site