lkml.org 
[lkml]   [2021]   [Mar]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC v2 12/43] mm: PKRAM: reserve preserved memory at boot
Date
Keep preserved pages from being recycled during boot by adding them
to the memblock reserved list during early boot. If memory reservation
fails (e.g. a region has already been reserved), all preserved pages
are dropped.

Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
---
arch/x86/kernel/setup.c | 3 ++
arch/x86/mm/init_64.c | 2 ++
include/linux/pkram.h | 8 ++++++
mm/pkram.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++--
4 files changed, 87 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d883176ef2ce..fbd85964719d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -15,6 +15,7 @@
#include <linux/iscsi_ibft.h>
#include <linux/memblock.h>
#include <linux/pci.h>
+#include <linux/pkram.h>
#include <linux/root_dev.h>
#include <linux/hugetlb.h>
#include <linux/tboot.h>
@@ -1146,6 +1147,8 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);

+ pkram_reserve();
+
if (boot_cpu_has(X86_FEATURE_GBPAGES))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b5a3fa4033d3..8efb2fb2a88b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -33,6 +33,7 @@
#include <linux/nmi.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
+#include <linux/pkram.h>

#include <asm/processor.h>
#include <asm/bios_ebda.h>
@@ -1293,6 +1294,7 @@ void __init mem_init(void)
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();

+ totalram_pages_add(pkram_reserved_pages);
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
diff --git a/include/linux/pkram.h b/include/linux/pkram.h
index 4f95d4fb5339..8d3d780d9fe1 100644
--- a/include/linux/pkram.h
+++ b/include/linux/pkram.h
@@ -99,4 +99,12 @@ int pkram_prepare_save(struct pkram_stream *ps, const char *name,
ssize_t pkram_write(struct pkram_access *pa, const void *buf, size_t count);
size_t pkram_read(struct pkram_access *pa, void *buf, size_t count);

+#ifdef CONFIG_PKRAM
+extern unsigned long pkram_reserved_pages;
+void pkram_reserve(void);
+#else
+#define pkram_reserved_pages 0UL
+static inline void pkram_reserve(void) { }
+#endif
+
#endif /* _LINUX_PKRAM_H */
diff --git a/mm/pkram.c b/mm/pkram.c
index b4a14837946a..03731bb6af26 100644
--- a/mm/pkram.c
+++ b/mm/pkram.c
@@ -135,6 +135,8 @@ struct pkram_super_block {
static LIST_HEAD(pkram_nodes); /* linked through page::lru */
static DEFINE_MUTEX(pkram_mutex); /* serializes open/close */

+unsigned long __initdata pkram_reserved_pages;
+
/*
* The PKRAM super block pfn, see above.
*/
@@ -144,6 +146,59 @@ static int __init parse_pkram_sb_pfn(char *arg)
}
early_param("pkram", parse_pkram_sb_pfn);

+static void * __init pkram_map_meta(unsigned long pfn)
+{
+ if (pfn >= max_low_pfn)
+ return ERR_PTR(-EINVAL);
+ return pfn_to_kaddr(pfn);
+}
+
+int pkram_merge_with_reserved(void);
+/*
+ * Reserve pages that belong to preserved memory.
+ *
+ * This function should be called at boot time as early as possible to prevent
+ * preserved memory from being recycled.
+ */
+void __init pkram_reserve(void)
+{
+ int err = 0;
+
+ if (!pkram_sb_pfn)
+ return;
+
+ pr_info("PKRAM: Examining preserved memory...\n");
+
+ /* Verify that nothing else has reserved the pkram_sb page */
+ if (memblock_is_region_reserved(PFN_PHYS(pkram_sb_pfn), PAGE_SIZE)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ pkram_sb = pkram_map_meta(pkram_sb_pfn);
+ if (IS_ERR(pkram_sb)) {
+ err = PTR_ERR(pkram_sb);
+ goto out;
+ }
+ /* An empty pkram_sb is not an error */
+ if (!pkram_sb->node_pfn) {
+ pkram_sb = NULL;
+ goto done;
+ }
+
+ err = pkram_merge_with_reserved();
+out:
+ if (err) {
+ pr_err("PKRAM: Reservation failed: %d\n", err);
+ WARN_ON(pkram_reserved_pages > 0);
+ pkram_sb = NULL;
+ return;
+ }
+
+done:
+ pr_info("PKRAM: %lu pages reserved\n", pkram_reserved_pages);
+}
+
static inline struct page *pkram_alloc_page(gfp_t gfp_mask)
{
struct page *page;
@@ -163,6 +218,11 @@ static inline struct page *pkram_alloc_page(gfp_t gfp_mask)

static inline void pkram_free_page(void *addr)
{
+ /*
+ * The page may have the reserved bit set since preserved pages
+ * are reserved early in boot.
+ */
+ ClearPageReserved(virt_to_page(addr));
pkram_remove_identity_map(virt_to_page(addr));
free_page((unsigned long)addr);
}
@@ -201,6 +261,11 @@ static void pkram_truncate_link(struct pkram_link *link)
if (!p)
continue;
page = pfn_to_page(PHYS_PFN(p));
+ /*
+ * The page may have the reserved bit set since preserved pages
+ * are reserved early in boot.
+ */
+ ClearPageReserved(page);
pkram_remove_identity_map(page);
put_page(page);
}
@@ -684,14 +749,20 @@ static int __pkram_bytes_save_page(struct pkram_access *pa, struct page *page)
static struct page *__pkram_prep_load_page(pkram_entry_t p)
{
struct page *page;
- int order;
+ int i, order;
short flags;

flags = (p >> PKRAM_ENTRY_FLAGS_SHIFT) & PKRAM_ENTRY_FLAGS_MASK;
+ order = p & PKRAM_ENTRY_ORDER_MASK;
page = pfn_to_page(PHYS_PFN(p));

+ for (i = 0; i < (1 << order); i++) {
+ struct page *pg = page + i;
+
+ ClearPageReserved(pg);
+ }
+
if (flags & PKRAM_PAGE_TRANS_HUGE) {
- order = p & PKRAM_ENTRY_ORDER_MASK;
prep_compound_page(page, order);
prep_transhuge_page(page);
}
@@ -1311,6 +1382,7 @@ int __init pkram_create_merged_reserved(struct memblock_type *new)
}

WARN_ON(cnt_a + cnt_b != k);
+ pkram_reserved_pages = nr_preserved;
new->cnt = cnt_a + cnt_b;
new->total_size = total_size;

--
1.8.3.1
\
 
 \ /
  Last update: 2021-03-30 23:28    [W:0.180 / U:0.232 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site