lkml.org 
[lkml]   [2020]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.19 125/133] arm64: ptrace: Consistently use pseudo-singlestep exceptions
    Date
    From: Will Deacon <will@kernel.org>

    commit ac2081cdc4d99c57f219c1a6171526e0fa0a6fff upstream.

    Although the arm64 single-step state machine can be fast-forwarded in
    cases where we wish to generate a SIGTRAP without actually executing an
    instruction, this has two major limitations outside of simply skipping
    an instruction due to emulation.

    1. Stepping out of a ptrace signal stop into a signal handler where
    SIGTRAP is blocked. Fast-forwarding the stepping state machine in
    this case will result in a forced SIGTRAP, with the handler reset to
    SIG_DFL.

    2. The hardware implicitly fast-forwards the state machine when executing
    an SVC instruction for issuing a system call. This can interact badly
    with subsequent ptrace stops signalled during the execution of the
    system call (e.g. SYSCALL_EXIT or seccomp traps), as they may corrupt
    the stepping state by updating the PSTATE for the tracee.

    Resolve both of these issues by injecting a pseudo-singlestep exception
    on entry to a signal handler and also on return to userspace following a
    system call.

    Cc: <stable@vger.kernel.org>
    Cc: Mark Rutland <mark.rutland@arm.com>
    Tested-by: Luis Machado <luis.machado@linaro.org>
    Reported-by: Keno Fischer <keno@juliacomputing.com>
    Signed-off-by: Will Deacon <will@kernel.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    arch/arm64/include/asm/thread_info.h | 1 +
    arch/arm64/kernel/ptrace.c | 27 ++++++++++++++++++++-------
    arch/arm64/kernel/signal.c | 11 ++---------
    arch/arm64/kernel/syscall.c | 2 +-
    4 files changed, 24 insertions(+), 17 deletions(-)

    --- a/arch/arm64/include/asm/thread_info.h
    +++ b/arch/arm64/include/asm/thread_info.h
    @@ -101,6 +101,7 @@ void arch_release_task_struct(struct tas
    #define _TIF_SECCOMP (1 << TIF_SECCOMP)
    #define _TIF_UPROBE (1 << TIF_UPROBE)
    #define _TIF_FSCHECK (1 << TIF_FSCHECK)
    +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
    #define _TIF_32BIT (1 << TIF_32BIT)
    #define _TIF_SVE (1 << TIF_SVE)

    --- a/arch/arm64/kernel/ptrace.c
    +++ b/arch/arm64/kernel/ptrace.c
    @@ -1647,12 +1647,23 @@ static void tracehook_report_syscall(str
    saved_reg = regs->regs[regno];
    regs->regs[regno] = dir;

    - if (dir == PTRACE_SYSCALL_EXIT)
    + if (dir == PTRACE_SYSCALL_ENTER) {
    + if (tracehook_report_syscall_entry(regs))
    + forget_syscall(regs);
    + regs->regs[regno] = saved_reg;
    + } else if (!test_thread_flag(TIF_SINGLESTEP)) {
    tracehook_report_syscall_exit(regs, 0);
    - else if (tracehook_report_syscall_entry(regs))
    - forget_syscall(regs);
    -
    - regs->regs[regno] = saved_reg;
    + regs->regs[regno] = saved_reg;
    + } else {
    + regs->regs[regno] = saved_reg;
    +
    + /*
    + * Signal a pseudo-step exception since we are stepping but
    + * tracer modifications to the registers may have rewound the
    + * state machine.
    + */
    + tracehook_report_syscall_exit(regs, 1);
    + }
    }

    int syscall_trace_enter(struct pt_regs *regs)
    @@ -1675,12 +1686,14 @@ int syscall_trace_enter(struct pt_regs *

    void syscall_trace_exit(struct pt_regs *regs)
    {
    + unsigned long flags = READ_ONCE(current_thread_info()->flags);
    +
    audit_syscall_exit(regs);

    - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
    + if (flags & _TIF_SYSCALL_TRACEPOINT)
    trace_sys_exit(regs, regs_return_value(regs));

    - if (test_thread_flag(TIF_SYSCALL_TRACE))
    + if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
    tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);

    rseq_syscall(regs);
    --- a/arch/arm64/kernel/signal.c
    +++ b/arch/arm64/kernel/signal.c
    @@ -798,7 +798,6 @@ static void setup_restart_syscall(struct
    */
    static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
    {
    - struct task_struct *tsk = current;
    sigset_t *oldset = sigmask_to_save();
    int usig = ksig->sig;
    int ret;
    @@ -822,14 +821,8 @@ static void handle_signal(struct ksignal
    */
    ret |= !valid_user_regs(&regs->user_regs, current);

    - /*
    - * Fast forward the stepping logic so we step into the signal
    - * handler.
    - */
    - if (!ret)
    - user_fastforward_single_step(tsk);
    -
    - signal_setup_done(ret, ksig, 0);
    + /* Step into the signal handler if we are stepping */
    + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
    }

    /*
    --- a/arch/arm64/kernel/syscall.c
    +++ b/arch/arm64/kernel/syscall.c
    @@ -121,7 +121,7 @@ static void el0_svc_common(struct pt_reg
    if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
    local_daif_mask();
    flags = current_thread_info()->flags;
    - if (!has_syscall_work(flags)) {
    + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
    /*
    * We're off to userspace, where interrupts are
    * always enabled after we restore the flags from

    \
     
     \ /
      Last update: 2020-07-20 17:55    [W:4.027 / U:0.320 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site