lkml.org 
[lkml]   [2021]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v27 18/31] mm: Add guard pages around a shadow stack.
    Date
    INCSSP(Q/D) increments shadow stack pointer and 'pops and discards' the
    first and the last elements in the range, effectively touches those memory
    areas.

    The maximum moving distance by INCSSPQ is 255 * 8 = 2040 bytes and
    255 * 4 = 1020 bytes by INCSSPD. Both ranges are far from PAGE_SIZE.
    Thus, putting a gap page on both ends of a shadow stack prevents INCSSP,
    CALL, and RET from going beyond.

    Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
    Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Cc: Kees Cook <keescook@chromium.org>
    ---
    v25:
    - Move SHADOW_STACK_GUARD_GAP to arch/x86/mm/mmap.c.

    v24:
    - Instead changing vm_*_gap(), create x86-specific versions.

    arch/x86/include/asm/page_types.h | 7 +++++
    arch/x86/mm/mmap.c | 46 +++++++++++++++++++++++++++++++
    include/linux/mm.h | 4 +++
    3 files changed, 57 insertions(+)

    diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
    index a506a411474d..e1533fdc08b4 100644
    --- a/arch/x86/include/asm/page_types.h
    +++ b/arch/x86/include/asm/page_types.h
    @@ -73,6 +73,13 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);

    extern void initmem_init(void);

    +#define vm_start_gap vm_start_gap
    +struct vm_area_struct;
    +extern unsigned long vm_start_gap(struct vm_area_struct *vma);
    +
    +#define vm_end_gap vm_end_gap
    +extern unsigned long vm_end_gap(struct vm_area_struct *vma);
    +
    #endif /* !__ASSEMBLY__ */

    #endif /* _ASM_X86_PAGE_DEFS_H */
    diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
    index f3f52c5e2fd6..81f9325084d3 100644
    --- a/arch/x86/mm/mmap.c
    +++ b/arch/x86/mm/mmap.c
    @@ -250,3 +250,49 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
    return false;
    return true;
    }
    +
    +/*
    + * Shadow stack pointer is moved by CALL, RET, and INCSSP(Q/D). INCSSPQ
    + * moves shadow stack pointer up to 255 * 8 = ~2 KB (~1KB for INCSSPD) and
    + * touches the first and the last element in the range, which triggers a
    + * page fault if the range is not in a shadow stack. Because of this,
    + * creating 4-KB guard pages around a shadow stack prevents these
    + * instructions from going beyond.
    + */
    +#define SHADOW_STACK_GUARD_GAP PAGE_SIZE
    +
    +unsigned long vm_start_gap(struct vm_area_struct *vma)
    +{
    + unsigned long vm_start = vma->vm_start;
    + unsigned long gap = 0;
    +
    + if (vma->vm_flags & VM_GROWSDOWN)
    + gap = stack_guard_gap;
    + else if (vma->vm_flags & VM_SHADOW_STACK)
    + gap = SHADOW_STACK_GUARD_GAP;
    +
    + if (gap != 0) {
    + vm_start -= gap;
    + if (vm_start > vma->vm_start)
    + vm_start = 0;
    + }
    + return vm_start;
    +}
    +
    +unsigned long vm_end_gap(struct vm_area_struct *vma)
    +{
    + unsigned long vm_end = vma->vm_end;
    + unsigned long gap = 0;
    +
    + if (vma->vm_flags & VM_GROWSUP)
    + gap = stack_guard_gap;
    + else if (vma->vm_flags & VM_SHADOW_STACK)
    + gap = SHADOW_STACK_GUARD_GAP;
    +
    + if (gap != 0) {
    + vm_end += gap;
    + if (vm_end < vma->vm_end)
    + vm_end = -PAGE_SIZE;
    + }
    + return vm_end;
    +}
    diff --git a/include/linux/mm.h b/include/linux/mm.h
    index 1e57c2b823ed..fd43bbcd91e2 100644
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -2699,6 +2699,7 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
    return vma;
    }

    +#ifndef vm_start_gap
    static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
    {
    unsigned long vm_start = vma->vm_start;
    @@ -2710,7 +2711,9 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
    }
    return vm_start;
    }
    +#endif

    +#ifndef vm_end_gap
    static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
    {
    unsigned long vm_end = vma->vm_end;
    @@ -2722,6 +2725,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
    }
    return vm_end;
    }
    +#endif

    static inline unsigned long vma_pages(struct vm_area_struct *vma)
    {
    --
    2.21.0
    \
     
     \ /
      Last update: 2021-05-22 00:16    [W:3.530 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site