lkml.org 
[lkml]   [2022]   [Apr]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3/8] stackleak: rework stack low bound handling
    Date
    In stackleak_task_init(), stackleak_track_stack(), and
    __stackleak_erase(), we open-code skipping the STACK_END_MAGIC at the
    bottom of the stack. Each case is implemented slightly differently, and
    only the __stackleak_erase() case is commented.

    In stackleak_task_init() and stackleak_track_stack() we unconditionally
    add sizeof(unsigned long) to the lowest stack address. In
    stackleak_task_init() we use end_of_stack() for this, and in
    stackleak_track_stack() we use task_stack_page(). In __stackleak_erase()
    we handle this by detecting if `kstack_ptr` has hit the stack end
    boundary, and if so, conditionally moving it above the magic.

    This patch adds a new stackleak_task_low_bound() helper which is used in
    all three cases, which unconditionally adds sizeof(unsigned long) to the
    lowest address on the task stack, with commentary as to why. This uses
    end_of_stack() as stackleak_task_init() did prior to this patch, as this
    is consistent with the code in kernel/fork.c which initializes the
    STACK_END_MAGIC value.

    In __stackleak_erase() we no longer need to check whether we've spilled
    into the STACK_END_MAGIC value, as stackleak_track_stack() ensures that
    `current->lowest_stack` stops immediately above this, and similarly the
    poison scan will stop immediately above this.

    For stackleak_task_init() and stackleak_track_stack() this results in no
    change to code generation. For __stackleak_erase() the generated
    assembly is slightly simpler and shorter.

    Signed-off-by: Mark Rutland <mark.rutland@arm.com>
    Cc: Alexander Popov <alex.popov@linux.com>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Andy Lutomirski <luto@kernel.org>
    Cc: Kees Cook <keescook@chromium.org>
    ---
    include/linux/stackleak.h | 15 ++++++++++++++-
    kernel/stackleak.c | 14 ++++----------
    2 files changed, 18 insertions(+), 11 deletions(-)

    diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
    index ccaab2043fcd5..67430faa5c518 100644
    --- a/include/linux/stackleak.h
    +++ b/include/linux/stackleak.h
    @@ -15,9 +15,22 @@
    #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
    #include <asm/stacktrace.h>

    +/*
    + * The lowest address on tsk's stack which we can plausibly erase.
    + */
    +static __always_inline unsigned long
    +stackleak_task_low_bound(const struct task_struct *tsk)
    +{
    + /*
    + * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
    + * which we must not corrupt.
    + */
    + return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
    +}
    +
    static inline void stackleak_task_init(struct task_struct *t)
    {
    - t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
    + t->lowest_stack = stackleak_task_low_bound(t);
    # ifdef CONFIG_STACKLEAK_METRICS
    t->prev_lowest_stack = t->lowest_stack;
    # endif
    diff --git a/kernel/stackleak.c b/kernel/stackleak.c
    index 753eab797a04d..0472956d9a2ce 100644
    --- a/kernel/stackleak.c
    +++ b/kernel/stackleak.c
    @@ -72,9 +72,11 @@ late_initcall(stackleak_sysctls_init);

    static __always_inline void __stackleak_erase(void)
    {
    + const unsigned long task_stack_low = stackleak_task_low_bound(current);
    +
    /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
    unsigned long kstack_ptr = current->lowest_stack;
    - unsigned long boundary = (unsigned long)end_of_stack(current);
    + unsigned long boundary = task_stack_low;
    unsigned int poison_count = 0;
    const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);

    @@ -92,13 +94,6 @@ static __always_inline void __stackleak_erase(void)
    kstack_ptr -= sizeof(unsigned long);
    }

    - /*
    - * One 'long int' at the bottom of the thread stack is reserved and
    - * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
    - */
    - if (kstack_ptr == boundary)
    - kstack_ptr += sizeof(unsigned long);
    -
    #ifdef CONFIG_STACKLEAK_METRICS
    current->prev_lowest_stack = kstack_ptr;
    #endif
    @@ -144,8 +139,7 @@ void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
    /* 'lowest_stack' should be aligned on the register width boundary */
    sp = ALIGN(sp, sizeof(unsigned long));
    if (sp < current->lowest_stack &&
    - sp >= (unsigned long)task_stack_page(current) +
    - sizeof(unsigned long)) {
    + sp >= stackleak_task_low_bound(current)) {
    current->lowest_stack = sp;
    }
    }
    --
    2.30.2
    \
     
     \ /
      Last update: 2022-04-25 13:59    [W:6.195 / U:0.088 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site