lkml.org 
[lkml]   [2022]   [Sep]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 5/9] mm/page_alloc: introduce init_reserved_pageblock()
Date
Most of the implementation of init_cma_reserved_pageblock() is
common to the initialization of any reserved pageblock for use
by the page allocator.

This commit breaks that functionality out into the new common
function init_reserved_pageblock() for use by code other than
CMA. The CMA specific code is relocated from page_alloc to the
point where init_cma_reserved_pageblock() was invoked and the
new function is used there instead. The error path is also
updated to use the function to operate on pageblocks rather
than pages.

Signed-off-by: Doug Berger <opendmb@gmail.com>
---
include/linux/gfp.h | 5 +----
mm/cma.c | 15 +++++++++++----
mm/page_alloc.c | 8 ++------
3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f314be58fa77..71ed687be406 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -367,9 +367,6 @@ extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);

-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
-#endif
+extern void init_reserved_pageblock(struct page *page);

#endif /* __LINUX_GFP_H */
diff --git a/mm/cma.c b/mm/cma.c
index 4a978e09547a..6208a3e1cd9d 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -31,6 +31,7 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
+#include <linux/page-isolation.h>
#include <trace/events/cma.h>

#include "cma.h"
@@ -116,8 +117,13 @@ static void __init cma_activate_area(struct cma *cma)
}

for (pfn = base_pfn; pfn < base_pfn + cma->count;
- pfn += pageblock_nr_pages)
- init_cma_reserved_pageblock(pfn_to_page(pfn));
+ pfn += pageblock_nr_pages) {
+ struct page *page = pfn_to_page(pfn);
+
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ init_reserved_pageblock(page);
+ page_zone(page)->cma_pages += pageblock_nr_pages;
+ }

spin_lock_init(&cma->lock);

@@ -133,8 +139,9 @@ static void __init cma_activate_area(struct cma *cma)
out_error:
/* Expose all pages to the buddy, they are useless for CMA. */
if (!cma->reserve_pages_on_error) {
- for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
- free_reserved_page(pfn_to_page(pfn));
+ for (pfn = base_pfn; pfn < base_pfn + cma->count;
+ pfn += pageblock_nr_pages)
+ init_reserved_pageblock(pfn_to_page(pfn));
}
totalcma_pages -= cma->count;
cma->count = 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 81f97c5ed080..6d4470b0daba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2302,9 +2302,8 @@ void __init page_alloc_init_late(void)
set_zone_contiguous(zone);
}

-#ifdef CONFIG_CMA
-/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
-void __init init_cma_reserved_pageblock(struct page *page)
+/* Free whole pageblock */
+void __init init_reserved_pageblock(struct page *page)
{
unsigned i = pageblock_nr_pages;
struct page *p = page;
@@ -2314,14 +2313,11 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_page_count(p, 0);
} while (++p, --i);

- set_pageblock_migratetype(page, MIGRATE_CMA);
set_page_refcounted(page);
__free_pages(page, pageblock_order);

adjust_managed_page_count(page, pageblock_nr_pages);
- page_zone(page)->cma_pages += pageblock_nr_pages;
}
-#endif

/*
* The order of subdivision here is critical for the IO subsystem.
--
2.25.1
\
 
 \ /
  Last update: 2022-09-29 00:35    [W:0.057 / U:0.840 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site