lkml.org 
[lkml]   [2021]   [Nov]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 2/2] mm: migrate: Allocate the node_demotion structure dynamically
Date
For the worst case (MAX_NUMNODES=1024), the node_demotion structure can
consume 32k bytes, which appears too large, so we can change to allocate
node_demotion dynamically at initialization time. Meanwhile allocating
the target demotion nodes array dynamically to select a suitable size
according to the MAX_NUMNODES.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/migrate.c | 38 +++++++++++++++++++++++++++++---------
1 file changed, 29 insertions(+), 9 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 126e9e6..0145b38 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1152,10 +1152,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
#define DEFAULT_DEMOTION_TARGET_NODES 15
struct demotion_nodes {
unsigned short nr;
- short nodes[DEFAULT_DEMOTION_TARGET_NODES];
+ short nodes[];
};

-static struct demotion_nodes node_demotion[MAX_NUMNODES] __read_mostly;
+static struct demotion_nodes *node_demotion[MAX_NUMNODES] __read_mostly;
+static unsigned short target_nodes_max;

/**
* next_demotion_node() - Get the next node in the demotion path
@@ -1168,10 +1169,13 @@ struct demotion_nodes {
*/
int next_demotion_node(int node)
{
- struct demotion_nodes *nd = &node_demotion[node];
+ struct demotion_nodes *nd = node_demotion[node];
unsigned short target_nr, index;
int target;

+ if (!nd)
+ return NUMA_NO_NODE;
+
/*
* node_demotion[] is updated without excluding this
* function from running. RCU doesn't provide any
@@ -3014,9 +3018,9 @@ static void __disable_all_migrate_targets(void)
int node, i;

for_each_online_node(node) {
- node_demotion[node].nr = 0;
- for (i = 0; i < DEFAULT_DEMOTION_TARGET_NODES; i++)
- node_demotion[node].nodes[i] = NUMA_NO_NODE;
+ node_demotion[node]->nr = 0;
+ for (i = 0; i < target_nodes_max; i++)
+ node_demotion[node]->nodes[i] = NUMA_NO_NODE;
}
}

@@ -3048,7 +3052,10 @@ static int establish_migrate_target(int node, nodemask_t *used,
int best_distance)
{
int migration_target, index, val;
- struct demotion_nodes *nd = &node_demotion[node];
+ struct demotion_nodes *nd = node_demotion[node];
+
+ if (WARN_ONCE(!nd, "Can not set up migration path for node:%d\n", node))
+ return NUMA_NO_NODE;

migration_target = find_next_best_node(node, used);
if (migration_target == NUMA_NO_NODE)
@@ -3067,7 +3074,7 @@ static int establish_migrate_target(int node, nodemask_t *used,
}

index = nd->nr;
- if (WARN_ONCE(index >= DEFAULT_DEMOTION_TARGET_NODES,
+ if (WARN_ONCE(index >= target_nodes_max,
"Exceeds maximum demotion target nodes\n"))
return NUMA_NO_NODE;

@@ -3256,7 +3263,20 @@ static int migration_offline_cpu(unsigned int cpu)

static int __init migrate_on_reclaim_init(void)
{
- int ret;
+ struct demotion_nodes *nd;
+ int ret, node;
+
+ /* Keep the maximum target demotion nodes are less than MAX_NUMNODES. */
+ target_nodes_max = min_t(unsigned short, DEFAULT_DEMOTION_TARGET_NODES,
+ MAX_NUMNODES - 1);
+ for_each_node(node) {
+ nd = kmalloc(struct_size(nd, nodes, target_nodes_max),
+ GFP_KERNEL);
+ if (!nd)
+ continue;
+
+ node_demotion[node] = nd;
+ }

ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
NULL, migration_offline_cpu);
--
1.8.3.1
\
 
 \ /
  Last update: 2021-11-11 08:49    [W:0.062 / U:0.920 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site