lkml.org 
[lkml]   [2022]   [Jun]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v7 08/12] mm/demotion: Add pg_data_t member to track node memory tier details
Date
Also update different helpes to use NODE_DATA()->memtier. Since
node specific memtier can change based on the reassignment of
NUMA node to a different memory tiers, accessing NODE_DATA()->memtier
needs to happen under an rcu read lock or memory_tier_lock.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
include/linux/memory-tiers.h | 11 ++++
include/linux/mmzone.h | 3 +
mm/memory-tiers.c | 104 +++++++++++++++++++++++++----------
3 files changed, 89 insertions(+), 29 deletions(-)

diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 453f6e5d357c..705b63ee31d5 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -6,6 +6,9 @@

#ifdef CONFIG_NUMA

+#include <linux/device.h>
+#include <linux/nodemask.h>
+
#define MEMORY_TIER_HBM_GPU 300
#define MEMORY_TIER_DRAM 200
#define MEMORY_TIER_PMEM 100
@@ -13,6 +16,12 @@
#define DEFAULT_MEMORY_TIER MEMORY_TIER_DRAM
#define MAX_MEMORY_TIER_ID 400

+struct memory_tier {
+ struct list_head list;
+ struct device dev;
+ nodemask_t nodelist;
+};
+
extern bool numa_demotion_enabled;
int node_create_and_set_memory_tier(int node, int tier);
#ifdef CONFIG_MIGRATION
@@ -25,6 +34,8 @@ static inline int next_demotion_node(int node)
#endif
int node_get_memory_tier_id(int node);
int node_update_memory_tier(int node, int tier);
+struct memory_tier *node_get_memory_tier(int node);
+void node_put_memory_tier(struct memory_tier *memtier);

#else

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index aab70355d64f..1f846cb723fd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -928,6 +928,9 @@ typedef struct pglist_data {
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier *memtier;
+#endif
} pg_data_t;

#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index b7cb368cb9c0..6a2476faf13a 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -1,22 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/nodemask.h>
#include <linux/slab.h>
#include <linux/lockdep.h>
#include <linux/moduleparam.h>
#include <linux/memory.h>
#include <linux/random.h>
+#include <linux/rcupdate.h>
#include <linux/memory-tiers.h>

#include "internal.h"

-struct memory_tier {
- struct list_head list;
- struct device dev;
- nodemask_t nodelist;
-};
-
struct demotion_nodes {
nodemask_t preferred;
};
@@ -120,7 +113,7 @@ static void memory_tier_device_release(struct device *dev)
{
struct memory_tier *tier = to_memory_tier(dev);

- kfree(tier);
+ kfree_rcu(tier);
}

static void insert_memory_tier(struct memory_tier *memtier)
@@ -176,13 +169,18 @@ static void unregister_memory_tier(struct memory_tier *memtier)

static struct memory_tier *__node_get_memory_tier(int node)
{
- struct memory_tier *memtier;
+ pg_data_t *pgdat;

- list_for_each_entry(memtier, &memory_tiers, list) {
- if (node_isset(node, memtier->nodelist))
- return memtier;
- }
- return NULL;
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return NULL;
+ /*
+ * Since we hold memory_tier_lock, we can avoid
+ * RCU read locks when accessing the details. No
+ * parallel updates are possible here.
+ */
+ return rcu_dereference_check(pgdat->memtier,
+ lockdep_is_held(&memory_tier_lock));
}

static struct memory_tier *__get_memory_tier_from_id(int id)
@@ -196,6 +194,33 @@ static struct memory_tier *__get_memory_tier_from_id(int id)
return NULL;
}

+/*
+ * Called with memory_tier_lock. Hence the device references cannot
+ * be dropped during this function.
+ */
+static void memtier_node_set(int node, struct memory_tier *memtier)
+{
+ pg_data_t *pgdat;
+ struct memory_tier *current_memtier;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return;
+ /*
+ * Make sure we mark the memtier NULL before we assign the new memory tier
+ * to the NUMA node. This make sure that anybody looking at NODE_DATA
+ * finds a NULL memtier or the one which is still valid.
+ */
+ current_memtier = rcu_dereference_check(pgdat->memtier,
+ lockdep_is_held(&memory_tier_lock));
+ rcu_assign_pointer(pgdat->memtier, NULL);
+ if (current_memtier)
+ node_clear(node, current_memtier->nodelist);
+ synchronize_rcu();
+ node_set(node, memtier->nodelist);
+ rcu_assign_pointer(pgdat->memtier, memtier);
+}
+
static int __node_create_and_set_memory_tier(int node, int tier)
{
int ret = 0;
@@ -209,7 +234,7 @@ static int __node_create_and_set_memory_tier(int node, int tier)
goto out;
}
}
- node_set(node, memtier->nodelist);
+ memtier_node_set(node, memtier);
out:
return ret;
}
@@ -231,14 +256,7 @@ int node_create_and_set_memory_tier(int node, int tier)
if (current_tier->dev.id == tier)
goto out;

- node_clear(node, current_tier->nodelist);
-
ret = __node_create_and_set_memory_tier(node, tier);
- if (ret) {
- /* reset it back to older tier */
- node_set(node, current_tier->nodelist);
- goto out;
- }
if (nodes_empty(current_tier->nodelist))
unregister_memory_tier(current_tier);

@@ -260,7 +278,7 @@ static int __node_set_memory_tier(int node, int tier)
ret = -EINVAL;
goto out;
}
- node_set(node, memtier->nodelist);
+ memtier_node_set(node, memtier);
out:
return ret;
}
@@ -316,10 +334,7 @@ int node_update_memory_tier(int node, int tier)
if (!current_tier || current_tier->dev.id == tier)
goto out;

- node_clear(node, current_tier->nodelist);
-
ret = __node_create_and_set_memory_tier(node, tier);
-
if (nodes_empty(current_tier->nodelist))
unregister_memory_tier(current_tier);

@@ -330,6 +345,34 @@ int node_update_memory_tier(int node, int tier)
return ret;
}

+/*
+ * lockless access to memory tier of a NUMA node.
+ */
+struct memory_tier *node_get_memory_tier(int node)
+{
+ pg_data_t *pgdat;
+ struct memory_tier *memtier;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return NULL;
+
+ rcu_read_lock();
+ memtier = rcu_dereference(pgdat->memtier);
+ if (!memtier)
+ goto out;
+
+ get_device(&memtier->dev);
+out:
+ rcu_read_unlock();
+ return memtier;
+}
+
+void node_put_memory_tier(struct memory_tier *memtier)
+{
+ put_device(&memtier->dev);
+}
+
#ifdef CONFIG_MIGRATION
/**
* next_demotion_node() - Get the next node in the demotion path
@@ -546,7 +589,7 @@ static const struct attribute_group *memory_tier_attr_groups[] = {

static int __init memory_tier_init(void)
{
- int ret;
+ int ret, node;
struct memory_tier *memtier;

ret = subsys_system_register(&memory_tier_subsys, memory_tier_attr_groups);
@@ -567,7 +610,10 @@ static int __init memory_tier_init(void)
__func__, PTR_ERR(memtier));

/* CPU only nodes are not part of memory tiers. */
- memtier->nodelist = node_states[N_MEMORY];
+ for_each_node_state(node, N_MEMORY) {
+ rcu_assign_pointer(NODE_DATA(node)->memtier, memtier);
+ node_set(node, memtier->nodelist);
+ }
mutex_unlock(&memory_tier_lock);

migrate_on_reclaim_init();
--
2.36.1
\
 
 \ /
  Last update: 2022-06-22 10:29    [W:0.142 / U:0.356 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site