lkml.org 
[lkml]   [2015]   [Jul]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v2 11/11] uprobes/x86: Make arch_uretprobe_is_alive(RP_CHECK_CALL) more clever
    The previous change documents that cleanup_return_instances() can't
    always detect the dead frames, the stack can grow. But there is one
    special case which imho worth fixing: arch_uretprobe_is_alive() can
    return true when the stack didn't actually grow, but the next "call"
    insn uses the already invalidated frame.

    Test-case:

    #include <stdio.h>
    #include <setjmp.h>

    jmp_buf jmp;
    int nr = 1024;

    void func_2(void)
    {
    if (--nr == 0)
    return;
    longjmp(jmp, 1);
    }

    void func_1(void)
    {
    setjmp(jmp);
    func_2();
    }

    int main(void)
    {
    func_1();
    return 0;
    }

    If you ret-probe func_1() and func_2() prepare_uretprobe() hits the
    MAX_URETPROBE_DEPTH limit and "return" from func_2() is not reported.

    When we know that the new call is not chained, we can do the more
    strict check. In this case "sp" points to the new ret-addr, so every
    frame which uses the same "sp" must be dead. The only complication is
    that arch_uretprobe_is_alive() needs to know was it chained or not, so
    we add the new RP_CHECK_CHAIN_CALL enum and change prepare_uretprobe()
    to pass RP_CHECK_CALL only if !chained.

    Note: arch_uretprobe_is_alive() could also re-read *sp and check if
    this word is still trampoline_vaddr. This could obviously improve the
    logic, but I would like to avoid another copy_from_user() especially
    in the case when we can't avoid the false "alive == T" positives.

    Signed-off-by: Oleg Nesterov <oleg@redhat.com>
    ---
    arch/x86/kernel/uprobes.c | 5 ++++-
    include/linux/uprobes.h | 1 +
    kernel/events/uprobes.c | 14 +++++++-------
    3 files changed, 12 insertions(+), 8 deletions(-)

    diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
    index 67eb168..a5c59f2 100644
    --- a/arch/x86/kernel/uprobes.c
    +++ b/arch/x86/kernel/uprobes.c
    @@ -997,5 +997,8 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
    bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
    struct pt_regs *regs)
    {
    - return regs->sp <= ret->stack;
    + if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
    + return regs->sp < ret->stack;
    + else
    + return regs->sp <= ret->stack;
    }
    diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
    index c0a5402..0bdc72f 100644
    --- a/include/linux/uprobes.h
    +++ b/include/linux/uprobes.h
    @@ -104,6 +104,7 @@ struct return_instance {

    enum rp_check {
    RP_CHECK_CALL,
    + RP_CHECK_CHAIN_CALL,
    RP_CHECK_RET,
    };

    diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
    index df5661a..0f370ef 100644
    --- a/kernel/events/uprobes.c
    +++ b/kernel/events/uprobes.c
    @@ -1511,10 +1511,11 @@ static unsigned long get_trampoline_vaddr(void)
    return trampoline_vaddr;
    }

    -static void cleanup_return_instances(struct uprobe_task *utask, struct pt_regs *regs)
    +static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
    + struct pt_regs *regs)
    {
    struct return_instance *ri = utask->return_instances;
    - enum rp_check ctx = RP_CHECK_CALL;
    + enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;

    while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
    ri = free_ret_instance(ri);
    @@ -1528,7 +1529,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
    struct return_instance *ri;
    struct uprobe_task *utask;
    unsigned long orig_ret_vaddr, trampoline_vaddr;
    - bool chained = false;
    + bool chained;

    if (!get_xol_area())
    return;
    @@ -1554,14 +1555,15 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
    goto fail;

    /* drop the entries invalidated by longjmp() */
    - cleanup_return_instances(utask, regs);
    + chained = (orig_ret_vaddr == trampoline_vaddr);
    + cleanup_return_instances(utask, chained, regs);

    /*
    * We don't want to keep trampoline address in stack, rather keep the
    * original return address of first caller thru all the consequent
    * instances. This also makes breakpoint unwrapping easier.
    */
    - if (orig_ret_vaddr == trampoline_vaddr) {
    + if (chained) {
    if (!utask->return_instances) {
    /*
    * This situation is not possible. Likely we have an
    @@ -1570,8 +1572,6 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
    uprobe_warn(current, "handle tail call");
    goto fail;
    }
    -
    - chained = true;
    orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
    }

    --
    1.5.5.1


    \
     
     \ /
      Last update: 2015-07-07 03:41    [W:4.169 / U:0.100 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site