lkml.org 
[lkml]   [2020]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v5 04/17] x86,ftrace: Fix ftrace_regs_caller() unwind
From
Date

On 4/16/20 1:47 PM, Peter Zijlstra wrote:
> The ftrace_regs_caller() trampoline does something 'funny' when there
> is a direct-caller present. In that case it stuffs the 'direct-caller'
> address on the return stack and then exits the function. This then
> results in 'returning' to the direct-caller with the exact registers
> we came in with -- an indirect tail-call without using a register.
>
> This however (rightfully) confuses objtool because the function shares
> a few instruction in order to have a single exit path, but the stack
> layout is different for them, depending through which path we came
> there.
>
> This is currently cludged by forcing the stack state to the non-direct
> case, but this generates actively wrong (ORC) unwind information for
> the direct case, leading to potential broken unwinds.
>
> Fix this issue by fully separating the exit paths. This results in
> having to poke a second RET into the trampoline copy, see
> ftrace_regs_caller_ret.
>
> This brings us to a second objtool problem, in order for it to
> perceive the 'jmp ftrace_epilogue' as a function exit, it needs to be
> recognised as a tail call. In order to make that happen,
> ftrace_epilogue needs to be the start of an STT_FUNC, so re-arrange
> code to make this so.
>
> Finally, a third issue is that objtool requires functions to exit with
> the same stack layout they started with, which is obviously violated
> in the direct case, employ the new HINT_RET_OFFSET to tell objtool
> this is an expected exception.
>
> Together, this results in generating correct ORC unwind information
> for the ftrace_regs_caller() function and it's trampoline copies.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
> arch/x86/kernel/ftrace.c | 12 ++++++++++--
> arch/x86/kernel/ftrace_64.S | 32 +++++++++++++++-----------------
> 2 files changed, 25 insertions(+), 19 deletions(-)
>
> --- a/arch/x86/kernel/ftrace.c
> +++ b/arch/x86/kernel/ftrace.c
> @@ -282,7 +282,8 @@ static inline void tramp_free(void *tram
>
> /* Defined as markers to the end of the ftrace default trampolines */
> extern void ftrace_regs_caller_end(void);
> -extern void ftrace_epilogue(void);
> +extern void ftrace_regs_caller_ret(void);
> +extern void ftrace_caller_end(void);
> extern void ftrace_caller_op_ptr(void);
> extern void ftrace_regs_caller_op_ptr(void);
>
> @@ -334,7 +335,7 @@ create_trampoline(struct ftrace_ops *ops
> call_offset = (unsigned long)ftrace_regs_call;
> } else {
> start_offset = (unsigned long)ftrace_caller;
> - end_offset = (unsigned long)ftrace_epilogue;
> + end_offset = (unsigned long)ftrace_caller_end;
> op_offset = (unsigned long)ftrace_caller_op_ptr;
> call_offset = (unsigned long)ftrace_call;
> }
> @@ -366,6 +367,13 @@ create_trampoline(struct ftrace_ops *ops
> if (WARN_ON(ret < 0))
> goto fail;
>
> + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
> + ip = trampoline + (ftrace_regs_caller_ret - ftrace_regs_caller);

It might be safer to use start_offset instead of ftrace_regs_caller (in
case start_offset is ever changed to something different from ftrace_regs_caller
in the future):

ip = trampoline + (ftrace_regs_caller_ret - start_offset);

alex.

> + ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
> + if (WARN_ON(ret < 0))
> + goto fail;
> + }
> +
> /*
> * The address of the ftrace_ops that is used for this trampoline
> * is stored at the end of the trampoline. This will be used to
> --- a/arch/x86/kernel/ftrace_64.S
> +++ b/arch/x86/kernel/ftrace_64.S
> @@ -157,8 +157,12 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBA
> * think twice before adding any new code or changing the
> * layout here.
> */
> -SYM_INNER_LABEL(ftrace_epilogue, SYM_L_GLOBAL)
> +SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
>
> + jmp ftrace_epilogue
> +SYM_FUNC_END(ftrace_caller);
> +
> +SYM_FUNC_START(ftrace_epilogue)
> #ifdef CONFIG_FUNCTION_GRAPH_TRACER
> SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
> jmp ftrace_stub
> @@ -170,14 +174,12 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L
> */
> SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
> retq
> -SYM_FUNC_END(ftrace_caller)
> +SYM_FUNC_END(ftrace_epilogue)
>
> SYM_FUNC_START(ftrace_regs_caller)
> /* Save the current flags before any operations that can change them */
> pushfq
>
> - UNWIND_HINT_SAVE
> -
> /* added 8 bytes to save flags */
> save_mcount_regs 8
> /* save_mcount_regs fills in first two parameters */
> @@ -233,7 +235,10 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_
> movq ORIG_RAX(%rsp), %rax
> movq %rax, MCOUNT_REG_SIZE-8(%rsp)
>
> - /* If ORIG_RAX is anything but zero, make this a call to that */
> + /*
> + * If ORIG_RAX is anything but zero, make this a call to that.
> + * See arch_ftrace_set_direct_caller().
> + */
> movq ORIG_RAX(%rsp), %rax
> cmpq $0, %rax
> je 1f
> @@ -244,20 +249,14 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_
> movq %rax, MCOUNT_REG_SIZE(%rsp)
>
> restore_mcount_regs 8
> + /* Restore flags */
> + popfq
>
> - jmp 2f
> +SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL);
> + UNWIND_HINT_RET_OFFSET
> + jmp ftrace_epilogue
>
> 1: restore_mcount_regs
> -
> -
> -2:
> - /*
> - * The stack layout is nondetermistic here, depending on which path was
> - * taken. This confuses objtool and ORC, rightfully so. For now,
> - * pretend the stack always looks like the non-direct case.
> - */
> - UNWIND_HINT_RESTORE
> -
> /* Restore flags */
> popfq
>
> @@ -268,7 +267,6 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_
> * to the return.
> */
> SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
> -
> jmp ftrace_epilogue
>
> SYM_FUNC_END(ftrace_regs_caller)
>
>

\
 
 \ /
  Last update: 2020-04-17 21:20    [W:0.268 / U:0.216 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site