lkml.org 
[lkml]   [2020]   [Jul]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 15/75] x86/boot/compressed/64: Always switch to own page-table
    Date
    From: Joerg Roedel <jroedel@suse.de>

    When booted through startup_64 the kernel keeps running on the EFI
    page-table until the KASLR code sets up its own page-table. Without
    KASLR the pre-decompression boot code never switches off the EFI
    page-table. Change that by unconditionally switching to a kernel
    controlled page-table after relocation.

    This makes sure we can make changes to the mapping when necessary, for
    example map pages unencrypted in SEV and SEV-ES guests.

    Also remove the debug_putstr() calls in initialize_identity_maps()
    because the function now runs before console_init() is called.

    Signed-off-by: Joerg Roedel <jroedel@suse.de>
    Reviewed-by: Kees Cook <keescook@chromium.org>
    ---
    arch/x86/boot/compressed/head_64.S | 3 +-
    arch/x86/boot/compressed/ident_map_64.c | 51 +++++++++++++++----------
    arch/x86/boot/compressed/kaslr.c | 3 --
    3 files changed, 32 insertions(+), 25 deletions(-)

    diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
    index 4174d2f97b29..36f18d5592f4 100644
    --- a/arch/x86/boot/compressed/head_64.S
    +++ b/arch/x86/boot/compressed/head_64.S
    @@ -543,10 +543,11 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
    rep stosq

    /*
    - * Load stage2 IDT
    + * Load stage2 IDT and switch to our own page-table
    */
    pushq %rsi
    call load_stage2_idt
    + call initialize_identity_maps
    popq %rsi

    /*
    diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
    index e3d980ae9c2b..ecf9353b064d 100644
    --- a/arch/x86/boot/compressed/ident_map_64.c
    +++ b/arch/x86/boot/compressed/ident_map_64.c
    @@ -86,9 +86,31 @@ phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
    */
    static struct x86_mapping_info mapping_info;

    +/*
    + * Adds the specified range to what will become the new identity mappings.
    + * Once all ranges have been added, the new mapping is activated by calling
    + * finalize_identity_maps() below.
    + */
    +void add_identity_map(unsigned long start, unsigned long size)
    +{
    + unsigned long end = start + size;
    +
    + /* Align boundary to 2M. */
    + start = round_down(start, PMD_SIZE);
    + end = round_up(end, PMD_SIZE);
    + if (start >= end)
    + return;
    +
    + /* Build the mapping. */
    + kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
    + start, end);
    +}
    +
    /* Locates and clears a region for a new top level page table. */
    void initialize_identity_maps(void)
    {
    + unsigned long start, size;
    +
    /* If running as an SEV guest, the encryption mask is required. */
    set_sev_encryption_mask();

    @@ -121,37 +143,24 @@ void initialize_identity_maps(void)
    */
    top_level_pgt = read_cr3_pa();
    if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
    - debug_putstr("booted via startup_32()\n");
    pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
    pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
    memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
    } else {
    - debug_putstr("booted via startup_64()\n");
    pgt_data.pgt_buf = _pgtable;
    pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
    memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
    top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
    }
    -}

    -/*
    - * Adds the specified range to what will become the new identity mappings.
    - * Once all ranges have been added, the new mapping is activated by calling
    - * finalize_identity_maps() below.
    - */
    -void add_identity_map(unsigned long start, unsigned long size)
    -{
    - unsigned long end = start + size;
    -
    - /* Align boundary to 2M. */
    - start = round_down(start, PMD_SIZE);
    - end = round_up(end, PMD_SIZE);
    - if (start >= end)
    - return;
    -
    - /* Build the mapping. */
    - kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
    - start, end);
    + /*
    + * New page-table is set up - map the kernel image and load it
    + * into cr3.
    + */
    + start = (unsigned long)_head;
    + size = _end - _head;
    + add_identity_map(start, size);
    + write_cr3(top_level_pgt);
    }

    /*
    diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
    index 7c61a8c5b9cf..856dc1c9bb0d 100644
    --- a/arch/x86/boot/compressed/kaslr.c
    +++ b/arch/x86/boot/compressed/kaslr.c
    @@ -903,9 +903,6 @@ void choose_random_location(unsigned long input,

    boot_params->hdr.loadflags |= KASLR_FLAG;

    - /* Prepare to add new identity pagetables on demand. */
    - initialize_identity_maps();
    -
    /* Record the various known unsafe memory ranges. */
    mem_avoid_init(input, input_size, *output);

    --
    2.27.0
    \
     
     \ /
      Last update: 2020-07-24 18:04    [W:4.087 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site