lkml.org 
[lkml]   [2024]   [May]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [POC][RFC][PATCH 0/2] pstore/mm/x86: Add wildcard memmap to map pstore consistently
On Wed, 1 May 2024 18:30:40 +0300
Mike Rapoport <rppt@kernel.org> wrote:

> > So this will allocate the same physical location for every boot, if booting
> > the same kernel and having the same physical memory layout?
>
> Up to kaslr that might use that location for the kernel image.
> But it's the same as allocating from e820 after kaslr.
>
> And, TBH, I don't have good ideas how to ensure the same physical location
> with randomization of the physical address of the kernel image.
>

I tried this approach and it unfortunately picks a different physical
location every time :-(

So it is either adding to e820 tables or we create a new way to
allocate memory at early boot up.

Below is the patch I used.

-- Steve


diff --git a/include/linux/mm.h b/include/linux/mm.h
index b6bdaa18b9e9..74aaf0bcb363 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4204,4 +4204,6 @@ static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
}

+int memmap_named(const char *name, unsigned long *start, unsigned long *size);
+
#endif /* _LINUX_MM_H */
diff --git a/mm/memblock.c b/mm/memblock.c
index d09136e040d3..3c015395d262 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2243,6 +2243,101 @@ void __init memblock_free_all(void)
pages = free_low_memory_core_early();
totalram_pages_add(pages);
}
+/* For wildcard memory requests, have a table to find them later */
+#define MEMMAP_MAX_MAPS 8
+#define MEMMAP_NAME_SIZE 16
+struct memmap_map {
+ char name[MEMMAP_NAME_SIZE];
+ unsigned long start;
+ unsigned long size;
+};
+static struct memmap_map memmap_list[MEMMAP_MAX_MAPS] __initdata;
+static int memmap_size __initdata;
+
+/* Add wildcard region with a lookup name */
+static int __init memmap_add(u64 start, u64 size, const char *name)
+{
+ struct memmap_map *map;
+
+ if (!name || !name[0] || strlen(name) >= MEMMAP_NAME_SIZE)
+ return -EINVAL;
+
+ if (memmap_size >= MEMMAP_MAX_MAPS)
+ return -1;
+
+ map = &memmap_list[memmap_size++];
+ map->start = start;
+ map->size = size;
+ strcpy(map->name, name);
+ return 0;
+}
+
+/**
+ * memmap_named - Find a wildcard region with a given name
+ * @name: The name that is attached to a wildcard region
+ * @start: If found, holds the start address
+ * @size: If found, holds the size of the address.
+ *
+ * Returns: 1 if found or 0 if not found.
+ */
+int __init memmap_named(const char *name, unsigned long *start, unsigned long *size)
+{
+ struct memmap_map *map;
+ int i;
+
+ for (i = 0; i < memmap_size; i++) {
+ map = &memmap_list[i];
+ if (!map->size)
+ continue;
+ if (strcmp(name, map->name) == 0) {
+ *start = map->start;
+ *size = map->size;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Parse early_reserve_mem=nn:align:name
+ */
+static int __init early_reserve_mem(char *p)
+{
+ phys_addr_t start, size, align;
+ char *oldp;
+ int err;
+
+ if (!p)
+ return -EINVAL;
+
+ oldp = p;
+ size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ if (*p != ':')
+ return -EINVAL;
+
+ align = memparse(p+1, &p);
+ if (*p != ':')
+ return -EINVAL;
+
+ start = memblock_phys_alloc(size, align);
+ if (!start)
+ return -ENOMEM;
+
+ p++;
+ err = memmap_add(start, size, p);
+ if (err) {
+ memblock_phys_free(start, size);
+ return err;
+ }
+
+ p += strlen(p);
+
+ return *p == '\0' ? 0: -EINVAL;
+}
+__setup("early_reserve_mem=", early_reserve_mem);

#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
static const char * const flagname[] = {
\
 
 \ /
  Last update: 2024-05-09 06:00    [W:0.098 / U:0.312 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site