lkml.org 
[lkml]   [2020]   [Mar]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [patch part-II V2 01/13] context_tracking: Ensure that the critical path cannot be instrumented
From
Date

On 3/8/20 11:24 PM, Thomas Gleixner wrote:
> context tracking lacks a few protection mechanisms against instrumentation:
>
> - While the core functions are marked NOKPROBE they lack protection
> against function tracing which is required as the function entry/exit
> points can be utilized by BPF.
>
> - static functions invoked from the protected functions need to be marked
> as well as they can be instrumented otherwise.
>
> - using plain inline allows the compiler to emit traceable and probable
> functions.
>
> Fix this by adding the missing notrace/NOKPROBE annotations and converting
> the plain inlines to __always_inline.
>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> include/linux/context_tracking.h | 14 +++++++-------
> include/linux/context_tracking_state.h | 6 +++---
> kernel/context_tracking.c | 9 +++++----
> 3 files changed, 15 insertions(+), 14 deletions(-)

Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>

alex.

> --- a/include/linux/context_tracking.h
> +++ b/include/linux/context_tracking.h
> @@ -20,32 +20,32 @@ extern void context_tracking_exit(enum c
> extern void context_tracking_user_enter(void);
> extern void context_tracking_user_exit(void);
>
> -static inline void user_enter(void)
> +static __always_inline void user_enter(void)
> {
> if (context_tracking_enabled())
> context_tracking_enter(CONTEXT_USER);
>
> }
> -static inline void user_exit(void)
> +static __always_inline void user_exit(void)
> {
> if (context_tracking_enabled())
> context_tracking_exit(CONTEXT_USER);
> }
>
> /* Called with interrupts disabled. */
> -static inline void user_enter_irqoff(void)
> +static __always_inline void user_enter_irqoff(void)
> {
> if (context_tracking_enabled())
> __context_tracking_enter(CONTEXT_USER);
>
> }
> -static inline void user_exit_irqoff(void)
> +static __always_inline void user_exit_irqoff(void)
> {
> if (context_tracking_enabled())
> __context_tracking_exit(CONTEXT_USER);
> }
>
> -static inline enum ctx_state exception_enter(void)
> +static __always_inline enum ctx_state exception_enter(void)
> {
> enum ctx_state prev_ctx;
>
> @@ -59,7 +59,7 @@ static inline enum ctx_state exception_e
> return prev_ctx;
> }
>
> -static inline void exception_exit(enum ctx_state prev_ctx)
> +static __always_inline void exception_exit(enum ctx_state prev_ctx)
> {
> if (context_tracking_enabled()) {
> if (prev_ctx != CONTEXT_KERNEL)
> @@ -75,7 +75,7 @@ static inline void exception_exit(enum c
> * is enabled. If context tracking is disabled, returns
> * CONTEXT_DISABLED. This should be used primarily for debugging.
> */
> -static inline enum ctx_state ct_state(void)
> +static __always_inline enum ctx_state ct_state(void)
> {
> return context_tracking_enabled() ?
> this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
> --- a/include/linux/context_tracking_state.h
> +++ b/include/linux/context_tracking_state.h
> @@ -26,12 +26,12 @@ struct context_tracking {
> extern struct static_key_false context_tracking_key;
> DECLARE_PER_CPU(struct context_tracking, context_tracking);
>
> -static inline bool context_tracking_enabled(void)
> +static __always_inline bool context_tracking_enabled(void)
> {
> return static_branch_unlikely(&context_tracking_key);
> }
>
> -static inline bool context_tracking_enabled_cpu(int cpu)
> +static __always_inline bool context_tracking_enabled_cpu(int cpu)
> {
> return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
> }
> @@ -41,7 +41,7 @@ static inline bool context_tracking_enab
> return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
> }
>
> -static inline bool context_tracking_in_user(void)
> +static __always_inline bool context_tracking_in_user(void)
> {
> return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
> }
> --- a/kernel/context_tracking.c
> +++ b/kernel/context_tracking.c
> @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(context_tracking_key);
> DEFINE_PER_CPU(struct context_tracking, context_tracking);
> EXPORT_SYMBOL_GPL(context_tracking);
>
> -static bool context_tracking_recursion_enter(void)
> +static notrace bool context_tracking_recursion_enter(void)
> {
> int recursion;
>
> @@ -44,8 +44,9 @@ static bool context_tracking_recursion_e
>
> return false;
> }
> +NOKPROBE_SYMBOL(context_tracking_recursion_enter);
>
> -static void context_tracking_recursion_exit(void)
> +static __always_inline void context_tracking_recursion_exit(void)
> {
> __this_cpu_dec(context_tracking.recursion);
> }
> @@ -59,7 +60,7 @@ static void context_tracking_recursion_e
> * instructions to execute won't use any RCU read side critical section
> * because this function sets RCU in extended quiescent state.
> */
> -void __context_tracking_enter(enum ctx_state state)
> +void notrace __context_tracking_enter(enum ctx_state state)
> {
> /* Kernel threads aren't supposed to go to userspace */
> WARN_ON_ONCE(!current->mm);
> @@ -142,7 +143,7 @@ NOKPROBE_SYMBOL(context_tracking_user_en
> * This call supports re-entrancy. This way it can be called from any exception
> * handler without needing to know if we came from userspace or not.
> */
> -void __context_tracking_exit(enum ctx_state state)
> +void notrace __context_tracking_exit(enum ctx_state state)
> {
> if (!context_tracking_recursion_enter())
> return;
>

\
 
 \ /
  Last update: 2020-03-10 11:13    [W:0.268 / U:0.488 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site