lkml.org 
[lkml]   [2022]   [Jul]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v4 09/45] x86: kmsan: pgtable: reduce vmalloc space
    From
    KMSAN is going to use 3/4 of existing vmalloc space to hold the
    metadata, therefore we lower VMALLOC_END to make sure vmalloc() doesn't
    allocate past the first 1/4.

    Signed-off-by: Alexander Potapenko <glider@google.com>
    ---
    v2:
    -- added x86: to the title

    Link: https://linux-review.googlesource.com/id/I9d8b7f0a88a639f1263bc693cbd5c136626f7efd
    ---
    arch/x86/include/asm/pgtable_64_types.h | 41 ++++++++++++++++++++++++-
    arch/x86/mm/init_64.c | 2 +-
    2 files changed, 41 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
    index 70e360a2e5fb7..ad6ded5b1dedf 100644
    --- a/arch/x86/include/asm/pgtable_64_types.h
    +++ b/arch/x86/include/asm/pgtable_64_types.h
    @@ -139,7 +139,46 @@ extern unsigned int ptrs_per_p4d;
    # define VMEMMAP_START __VMEMMAP_BASE_L4
    #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */

    -#define VMALLOC_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
    +#define VMEMORY_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
    +
    +#ifndef CONFIG_KMSAN
    +#define VMALLOC_END VMEMORY_END
    +#else
    +/*
    + * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4
    + * are used to keep the metadata for virtual pages. The memory formerly
    + * belonging to vmalloc area is now laid out as follows:
    + *
    + * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area
    + * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to
    + * VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow
    + * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to
    + * VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins
    + * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START
    + * - shadow for modules,
    + * KMSAN_MODULES_ORIGIN_START to
    + * KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules.
    + */
    +#define VMALLOC_QUARTER_SIZE ((VMALLOC_SIZE_TB << 40) >> 2)
    +#define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1)
    +
    +/*
    + * vmalloc metadata addresses are calculated by adding shadow/origin offsets
    + * to vmalloc address.
    + */
    +#define KMSAN_VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE
    +#define KMSAN_VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE << 1)
    +
    +#define KMSAN_VMALLOC_SHADOW_START (VMALLOC_START + KMSAN_VMALLOC_SHADOW_OFFSET)
    +#define KMSAN_VMALLOC_ORIGIN_START (VMALLOC_START + KMSAN_VMALLOC_ORIGIN_OFFSET)
    +
    +/*
    + * The shadow/origin for modules are placed one by one in the last 1/4 of
    + * vmalloc space.
    + */
    +#define KMSAN_MODULES_SHADOW_START (VMALLOC_END + KMSAN_VMALLOC_ORIGIN_OFFSET + 1)
    +#define KMSAN_MODULES_ORIGIN_START (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
    +#endif /* CONFIG_KMSAN */

    #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
    /* The module sections ends with the start of the fixmap */
    diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
    index 39c5246964a91..5806331172361 100644
    --- a/arch/x86/mm/init_64.c
    +++ b/arch/x86/mm/init_64.c
    @@ -1287,7 +1287,7 @@ static void __init preallocate_vmalloc_pages(void)
    unsigned long addr;
    const char *lvl;

    - for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
    + for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
    pgd_t *pgd = pgd_offset_k(addr);
    p4d_t *p4d;
    pud_t *pud;
    --
    2.37.0.rc0.161.g10f37bed90-goog
    \
     
     \ /
      Last update: 2022-07-01 16:26    [W:3.392 / U:1.592 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site