lkml.org 
[lkml]   [2020]   [Dec]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RFC] x86/sgx: Add trivial NUMA allocation
Date
Create a pointer array for each NUMA node with the references to the
contained EPC sections. Use this in __sgx_alloc_epc_page() to knock the
current NUMA node before the others.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
---
arch/x86/kernel/cpu/sgx/main.c | 66 +++++++++++++++++++++++++++++++---
1 file changed, 61 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index c519fc5f6948..0da510763c47 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -13,6 +13,13 @@
#include "encl.h"
#include "encls.h"

+struct sgx_numa_node {
+ struct sgx_epc_section *sections[SGX_MAX_EPC_SECTIONS];
+ int nr_sections;
+};
+
+static struct sgx_numa_node sgx_numa_nodes[MAX_NUMNODES];
+static int sgx_nr_numa_nodes;
struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
static int sgx_nr_epc_sections;
static struct task_struct *ksgxd_tsk;
@@ -473,6 +480,25 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
return page;
}

+static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
+{
+ struct sgx_numa_node *node = &sgx_numa_nodes[nid];
+ struct sgx_epc_section *section;
+ struct sgx_epc_page *page;
+ int i;
+
+ for (i = 0; i < node->nr_sections; i++) {
+ section = node->sections[i];
+
+ page = __sgx_alloc_epc_page_from_section(section);
+ if (page)
+ return page;
+ }
+
+ return NULL;
+}
+
+
/**
* __sgx_alloc_epc_page() - Allocate an EPC page
*
@@ -485,14 +511,19 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
*/
struct sgx_epc_page *__sgx_alloc_epc_page(void)
{
- struct sgx_epc_section *section;
struct sgx_epc_page *page;
+ int nid = numa_node_id();
int i;

- for (i = 0; i < sgx_nr_epc_sections; i++) {
- section = &sgx_epc_sections[i];
+ page = __sgx_alloc_epc_page_from_node(nid);
+ if (page)
+ return page;

- page = __sgx_alloc_epc_page_from_section(section);
+ for (i = 0; i < sgx_nr_numa_nodes; i++) {
+ if (i == nid)
+ continue;
+
+ page = __sgx_alloc_epc_page_from_node(i);
if (page)
return page;
}
@@ -661,11 +692,28 @@ static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
((high & GENMASK_ULL(19, 0)) << 32);
}

+static int __init sgx_pfn_to_nid(unsigned long pfn)
+{
+ pg_data_t *pgdat;
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ pgdat = NODE_DATA(nid);
+
+ if (pfn >= pgdat->node_start_pfn &&
+ pfn < (pgdat->node_start_pfn + pgdat->node_spanned_pages))
+ return nid;
+ }
+
+ return 0;
+}
+
static bool __init sgx_page_cache_init(void)
{
u32 eax, ebx, ecx, edx, type;
+ struct sgx_numa_node *node;
u64 pa, size;
- int i;
+ int i, nid;

for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
@@ -690,6 +738,14 @@ static bool __init sgx_page_cache_init(void)
}

sgx_nr_epc_sections++;
+
+ nid = sgx_pfn_to_nid(PFN_DOWN(pa));
+ node = &sgx_numa_nodes[nid];
+
+ node->sections[node->nr_sections] = &sgx_epc_sections[i];
+ node->nr_sections++;
+
+ sgx_nr_numa_nodes = max(sgx_nr_numa_nodes, nid + 1);
}

if (!sgx_nr_epc_sections) {
--
2.27.0
\
 
 \ /
  Last update: 2020-12-16 14:53    [W:0.285 / U:0.296 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site