lkml.org 
[lkml]   [2022]   [Jun]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v6 11/13] mm/demotion: Update node_is_toptier to work with memory tiers
Date
With memory tiers support we can have memory only NUMA nodes
in the top tier from which we want to avoid promotion tracking NUMA
faults. Update node_is_toptier to work with memory tiers.
All NUMA nodes are by default top tier nodes. With lower memory
tiers added we consider all memory tiers above a memory tier having
CPU NUMA nodes as top memory tier

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
include/linux/memory-tiers.h | 6 +++++
include/linux/node.h | 5 ----
mm/huge_memory.c | 1 +
mm/memory-tiers.c | 44 ++++++++++++++++++++++++++++++++++--
mm/migrate.c | 1 +
mm/mprotect.c | 1 +
6 files changed, 51 insertions(+), 7 deletions(-)

diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 47841379553c..de4098f6d5d5 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -39,6 +39,7 @@ int node_reset_memory_tier(int node, int tier);
struct memory_tier *node_get_memory_tier(int node);
void node_put_memory_tier(struct memory_tier *memtier);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);

#else

@@ -52,6 +53,11 @@ static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *target
{
*targets = NODE_MASK_NONE;
}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
#endif /* CONFIG_TIERED_MEMORY */

#endif
diff --git a/include/linux/node.h b/include/linux/node.h
index 40d641a8bfb0..9ec680dd607f 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -185,9 +185,4 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg,

#define to_node(device) container_of(device, struct node, dev)

-static inline bool node_is_toptier(int node)
-{
- return node_state(node, N_CPU);
-}
-
#endif /* _LINUX_NODE_H_ */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a77c78a2b6b5..294873d4be2b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -35,6 +35,7 @@
#include <linux/numa.h>
#include <linux/page_owner.h>
#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>

#include <asm/tlb.h>
#include <asm/pgalloc.h>
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index b2ed16dcfb03..0dae3114e22c 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -17,7 +17,7 @@ struct demotion_nodes {
static void establish_migration_targets(void);
static DEFINE_MUTEX(memory_tier_lock);
static LIST_HEAD(memory_tiers);
-
+static int top_tier_rank;
/*
* node_demotion[] examples:
*
@@ -126,7 +126,7 @@ static void memory_tier_device_release(struct device *dev)
if (tier->dev.id >= MAX_STATIC_MEMORY_TIERS)
ida_free(&memtier_dev_id, tier->dev.id);

- kfree(tier);
+ kfree_rcu(tier);
}

/*
@@ -443,6 +443,31 @@ void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
rcu_read_unlock();
}

+bool node_is_toptier(int node)
+{
+ bool toptier;
+ pg_data_t *pgdat;
+ struct memory_tier *memtier;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return false;
+
+ rcu_read_lock();
+ memtier = rcu_dereference(pgdat->memtier);
+ if (!memtier) {
+ toptier = true;
+ goto out;
+ }
+ if (memtier->rank >= top_tier_rank)
+ toptier = true;
+ else
+ toptier = false;
+out:
+ rcu_read_unlock();
+ return toptier;
+}
+
/**
* next_demotion_node() - Get the next node in the demotion path
* @node: The starting node to lookup the next node
@@ -592,6 +617,21 @@ static void establish_migration_targets(void)
} while (1);
}
build_lower_tier_mask:
+ /*
+ * Promotion is allowed from a memory tier to higher
+ * memory tier only if the memory tier doesn't include
+ * compute. We want to skip promotion from a memory tier,
+ * if any node that is part of the memory tier have CPUs.
+ * Once we detect such a memory tier, we consider that tier
+ * as top tiper from which promotion is not allowed.
+ */
+ list_for_each_entry_reverse(memtier, &memory_tiers, list) {
+ nodes_and(used, node_states[N_CPU], memtier->nodelist);
+ if (!nodes_empty(used)) {
+ top_tier_rank = memtier->rank;
+ break;
+ }
+ }
/*
* Now build the lower_tier mask for each node collecting node mask from
* all memory tier below it. This allows us to fallback demotion page
diff --git a/mm/migrate.c b/mm/migrate.c
index 0b554625a219..78615c48fc0f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -50,6 +50,7 @@
#include <linux/memory.h>
#include <linux/random.h>
#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>

#include <asm/tlbflush.h>

diff --git a/mm/mprotect.c b/mm/mprotect.c
index ba5592655ee3..92a2fc0fa88b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -31,6 +31,7 @@
#include <linux/pgtable.h>
#include <linux/sched/sysctl.h>
#include <linux/userfaultfd_k.h>
+#include <linux/memory-tiers.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
--
2.36.1
\
 
 \ /
  Last update: 2022-06-10 16:00    [W:1.502 / U:0.488 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site