lkml.org 
[lkml]   [2022]   [Jun]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] mm: add a new emergency page migratetype.
Date
From: xinhuanpeng <xinhuanpeng@xiaomi.com>

add a new page migratetype reserved for
non-costly non-NOWARN page allocation failure.

Signed-off-by: xinhuanpeng <xinhuanpeng@xiaomi.com>
---
include/linux/gfp.h | 4 ++
include/linux/mmzone.h | 16 ++++++
init/main.c | 3 ++
mm/Kconfig | 11 +++++
mm/page_alloc.c | 107 +++++++++++++++++++++++++++++++++++++++++
5 files changed, 141 insertions(+)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 2d2ccae933c2..fcc758a34e11 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -716,4 +716,8 @@ void free_contig_range(unsigned long pfn, unsigned long nr_pages);
extern void init_cma_reserved_pageblock(struct page *page);
#endif

+#ifdef CONFIG_EMERGENCY_MEMORY
+extern void emergency_mm_init(void);
+#endif
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index aab70355d64f..5d3962d7cdaf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -60,6 +60,13 @@ enum migratetype {
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
+#endif
+#ifdef CONFIG_EMERGENCY_MEMORY
+ /*
+ * MIGRATE_EMERGENCY migration type is designed to save
+ * non-costly non-NOWARN page allocation failure.
+ */
+ MIGRATE_EMERGENCY,
#endif
MIGRATE_TYPES
};
@@ -413,6 +420,11 @@ struct per_cpu_nodestat {

#endif /* !__GENERATING_BOUNDS.H */

+#ifdef CONFIG_EMERGENCY_MEMORY
+/* The maximum number of pages in MIGRATE_EMERGENCY migration type */
+#define MAX_MANAGED_EMERGENCY 2048
+#endif
+
enum zone_type {
/*
* ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
@@ -517,6 +529,10 @@ struct zone {

unsigned long nr_reserved_highatomic;

+#ifdef CONFIG_EMERGENCY_MEMORY
+ /* The actual number of pages in MIGRATE_EMERGENCY migration type */
+ unsigned long nr_reserved_emergency;
+#endif
/*
* We don't know if the memory that we're going to allocate will be
* freeable or/and it will be released eventually, so to avoid totally
diff --git a/init/main.c b/init/main.c
index 02eb533018f6..7ff2e7761198 100644
--- a/init/main.c
+++ b/init/main.c
@@ -852,6 +852,9 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+#ifdef CONFIG_EMERGENCY_MEMORY
+ emergency_mm_init();
+#endif
}

#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
diff --git a/mm/Kconfig b/mm/Kconfig
index 169e64192e48..a13559cf13cf 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1132,4 +1132,15 @@ config PTE_MARKER_UFFD_WP

source "mm/damon/Kconfig"

+config EMERGENCY_MEMORY
+ bool "Enable emergency memory"
+ default n
+ help
+
+ Add a new emergency page migratetype.
+
+ This enables the migration type MIGRATE_EMERGENCY,which reserves
+ a small amount of memory for non-costly non-NOWARN page allocation
+ failure.
+
endmenu
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e008a3df0485..a149fb7fc2f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -324,6 +324,9 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
#ifdef CONFIG_CMA
"CMA",
#endif
+#ifdef CONFIG_EMERGENCY_MEMORY
+ "Emergency",
+#endif
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif
@@ -2908,6 +2911,43 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
return false;
}

+#ifdef CONFIG_EMERGENCY_MEMORY
+/* Initialization of the migration type MIGRATE_EMERGENCY */
+void __init emergency_mm_init(void)
+{
+ /*
+ * If pageblock_order < MAX_ORDER - 1 ,then allocating a few pageblocks may
+ * cause the buddy system to merge two pageblocks of different migration types,
+ * for example, MIGRATE_EMERGENCY and MIGRATE_MOVABLE.
+ */
+ if (pageblock_order == MAX_ORDER - 1) {
+ int nid = 0;
+
+ pr_info("start to setup MIGRATE_EMERGENCY reserved memory.");
+ for_each_online_node(nid) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct zone *zone = &pgdat->node_zones[ZONE_NORMAL];
+
+ while (zone->nr_reserved_emergency < MAX_MANAGED_EMERGENCY) {
+ struct page *page = alloc_pages(___GFP_MOVABLE, pageblock_order);
+
+ if (page == NULL) {
+ pr_warn("node id %d MIGRATE_EMERGENCY reserved pages failed, reserved %d pages.",
+ nid, zone->nr_reserved_emergency);
+ break;
+ }
+ set_pageblock_migratetype(page, MIGRATE_EMERGENCY);
+ __free_pages(page, pageblock_order);
+ zone->nr_reserved_emergency += pageblock_nr_pages;
+ }
+ pr_info("node id %d MIGRATE_EMERGENCY reserved %d pages.",
+ nid, zone->nr_reserved_emergency);
+
+ }
+ }
+}
+#endif
+
/*
* Try finding a free buddy page on the fallback list and put it on the free
* list of requested migratetype, possibly along with other pages from the same
@@ -3861,6 +3901,14 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
long unusable_free = (1 << order) - 1;

+#ifdef CONFIG_EMERGENCY_MEMORY
+ /*
+ * If the migration type MIGRATE_EMERGENCY enable,then subtract
+ * reserved pages.
+ */
+ unusable_free -= z->nr_reserved_emergency;
+#endif
+
/*
* If the caller does not have rights to ALLOC_HARDER then subtract
* the high-atomic reserves. This will over-estimate the size of the
@@ -4073,6 +4121,50 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
return alloc_flags;
}

+#ifdef CONFIG_EMERGENCY_MEMORY
+/*
+ * get_emergency_page_from_freelist allocates pages in reserved memory
+ * in the migration type MIGRATE_EMERGENCY.
+ */
+static struct page *get_emergency_page_from_freelist(gfp_t gfp_mask, unsigned int order,
+ int alloc_flags, const struct alloc_context *ac, int migratetype)
+{
+ struct page *page = NULL;
+
+ if (ac->high_zoneidx >= ZONE_NORMAL) {
+ struct zoneref *z = ac->preferred_zoneref;
+ struct pglist_data *pgdat = NODE_DATA(zonelist_node_idx(z));
+ struct zone *zone = &pgdat->node_zones[ZONE_NORMAL];
+ unsigned long flags;
+
+ if (cpusets_enabled() &&
+ (alloc_flags & ALLOC_CPUSET) &&
+ !__cpuset_zone_allowed(zone, gfp_mask))
+ return NULL;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ do {
+ page = __rmqueue_smallest(zone, order, migratetype);
+ } while (page && check_new_pages(page, order));
+
+ spin_unlock(&zone->lock);
+
+ if (page) {
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_pcppage_migratetype(page));
+
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ zone_statistics(z->zone, zone);
+ prep_new_page(page, order, gfp_mask, alloc_flags);
+ }
+ local_irq_restore(flags);
+ }
+
+ return page;
+
+}
+#endif
+
/*
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
@@ -5169,6 +5261,18 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto retry;
}
fail:
+#ifdef CONFIG_EMERGENCY_MEMORY
+ if (!(gfp_mask & __GFP_NOWARN) && !costly_order) {
+ /*
+ * If this allocation belongs to non-costly non-NOWARN page allocation,
+ * then uses the reserved memory in the migration type MIGRATE_EMERGENCY.
+ */
+ page = get_emergency_page_from_freelist(gfp_mask, order, alloc_flags, ac,
+ MIGRATE_EMERGENCY);
+ if (page)
+ goto got_pg;
+ }
+#endif
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:
@@ -5905,6 +6009,9 @@ static void show_migration_types(unsigned char type)
#ifdef CONFIG_CMA
[MIGRATE_CMA] = 'C',
#endif
+#ifdef CONFIG_EMERGENCY_MEMORY
+ [MIGRATE_EMERGENCY] = 'G',
+#endif
#ifdef CONFIG_MEMORY_ISOLATION
[MIGRATE_ISOLATE] = 'I',
#endif
--
2.36.1
\
 
 \ /
  Last update: 2022-06-06 05:28    [W:0.868 / U:0.088 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site