lkml.org 
[lkml]   [2022]   [Jul]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH 3/3] LoongArch: Add stacktrace support
From
Date


On 07/28/2022 10:05 PM, Qing Zhang wrote:
> Use common arch_stack_walk infrastructure to avoid duplicated code and
> avoid taking care of the stack storage and filtering.
> Add sra (means __schedule return address) and scfa (means __schedule call
> frame address) to thread_info and store it in switch_to().
>
> Now we can print the process stack by cat /proc/*/stack and can better
> support ftrace.
>
> Signed-off-by: Qing Zhang <zhangqing@loongson.cn>
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index b57daee98b89..1bd656285674 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -38,6 +38,7 @@ config LOONGARCH
> select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
> select ARCH_MIGHT_HAVE_PC_PARPORT
> select ARCH_MIGHT_HAVE_PC_SERIO
> + select ARCH_STACKWALK
> select ARCH_SPARSEMEM_ENABLE
> select ARCH_SUPPORTS_ACPI
> select ARCH_SUPPORTS_ATOMIC_RMW
> @@ -141,6 +142,10 @@ config LOCKDEP_SUPPORT
> bool
> default y
>
> +config STACKTRACE_SUPPORT
> + bool
> + default y
> +
> # MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
> # MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
> # are shared between architectures, and specifically expecting the symbols.
> diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
> index 57ec45aa078e..1c4b4308378d 100644
> --- a/arch/loongarch/include/asm/processor.h
> +++ b/arch/loongarch/include/asm/processor.h
> @@ -101,6 +101,10 @@ struct thread_struct {
> unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
> unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
>
> + /* __schedule() return address / call frame address */
> + unsigned long sched_ra;
> + unsigned long sched_cfa;
> +
> /* CSR registers */
> unsigned long csr_prmd;
> unsigned long csr_crmd;
> @@ -129,6 +133,9 @@ struct thread_struct {
> struct loongarch_fpu fpu FPU_ALIGN;
> };
>
> +#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
> +#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
> +
> #define INIT_THREAD { \
> /* \
> * Main processor registers \
> @@ -145,6 +152,8 @@ struct thread_struct {
> .reg29 = 0, \
> .reg30 = 0, \
> .reg31 = 0, \
> + .sched_ra = 0, \
> + .sched_cfa = 0, \
> .csr_crmd = 0, \
> .csr_prmd = 0, \
> .csr_euen = 0, \
> diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
> index 2a8d04375574..836cfcc19498 100644
> --- a/arch/loongarch/include/asm/switch_to.h
> +++ b/arch/loongarch/include/asm/switch_to.h
> @@ -15,12 +15,15 @@ struct task_struct;
> * @prev: The task previously executed.
> * @next: The task to begin executing.
> * @next_ti: task_thread_info(next).
> + * @sched_ra: __schedule return address.
> + * @sched_cfa: __schedule call frame address.
> *
> * This function is used whilst scheduling to save the context of prev & load
> * the context of next. Returns prev.
> */
> extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
> - struct task_struct *next, struct thread_info *next_ti);
> + struct task_struct *next, struct thread_info *next_ti,
> + void *sched_ra, void *sched_cfa);
>
> /*
> * For newly created kernel threads switch_to() will return to
> @@ -31,7 +34,8 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
> #define switch_to(prev, next, last) \
> do { \
> lose_fpu_inatomic(1, prev); \
> - (last) = __switch_to(prev, next, task_thread_info(next)); \
> + (last) = __switch_to(prev, next, task_thread_info(next), \
> + __builtin_return_address(0), __builtin_frame_address(0)); \
'\' format alignment.

Thanks,
Youling

\
 
 \ /
  Last update: 2022-07-29 04:14    [W:0.899 / U:0.480 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site