lkml.org 
[lkml]   [2022]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v4 10/18] KVM: arm64: Stub implementation of pKVM HYP stack unwinder
    From
    Add some stub implementations of protected nVHE stack unwinder, for
    building. These are implemented later in this series.

    Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
    ---
    arch/arm64/include/asm/stacktrace/nvhe.h | 57 ++++++++++++++++++++++++
    arch/arm64/kvm/hyp/nvhe/stacktrace.c | 3 +-
    2 files changed, 58 insertions(+), 2 deletions(-)
    create mode 100644 arch/arm64/include/asm/stacktrace/nvhe.h

    diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
    new file mode 100644
    index 000000000000..1eac4e57f2ae
    --- /dev/null
    +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
    @@ -0,0 +1,57 @@
    +/* SPDX-License-Identifier: GPL-2.0-only */
    +/*
    + * KVM nVHE hypervisor stack tracing support.
    + *
    + * The unwinder implementation depends on the nVHE mode:
    + *
    + * 1) pKVM (protected nVHE) mode - the host cannot directly access
    + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared
    + * buffer where the host can read and print the stacktrace.
    + *
    + * Copyright (C) 2022 Google LLC
    + */
    +#ifndef __ASM_STACKTRACE_NVHE_H
    +#define __ASM_STACKTRACE_NVHE_H
    +
    +#include <asm/stacktrace/common.h>
    +
    +static inline bool on_accessible_stack(const struct task_struct *tsk,
    + unsigned long sp, unsigned long size,
    + struct stack_info *info)
    +{
    + return false;
    +}
    +
    +/*
    + * Protected nVHE HYP stack unwinder
    + */
    +#ifdef __KVM_NVHE_HYPERVISOR__
    +
    +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
    +static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
    + struct stack_info *info)
    +{
    + return false;
    +}
    +
    +static int notrace unwind_next(struct unwind_state *state)
    +{
    + return 0;
    +}
    +NOKPROBE_SYMBOL(unwind_next);
    +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
    +static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
    + struct stack_info *info)
    +{
    + return false;
    +}
    +
    +static int notrace unwind_next(struct unwind_state *state)
    +{
    + return 0;
    +}
    +NOKPROBE_SYMBOL(unwind_next);
    +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
    +
    +#endif /* __KVM_NVHE_HYPERVISOR__ */
    +#endif /* __ASM_STACKTRACE_NVHE_H */
    diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
    index 69e65b457f1c..96c8b93320eb 100644
    --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
    +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
    @@ -4,8 +4,7 @@
    *
    * Copyright (C) 2022 Google LLC
    */
    -#include <asm/memory.h>
    -#include <asm/percpu.h>
    +#include <asm/stacktrace/nvhe.h>

    DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
    __aligned(16);
    --
    2.37.0.170.g444d1eabd0-goog
    \
     
     \ /
      Last update: 2022-07-15 08:13    [W:4.849 / U:0.512 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site