lkml.org 
[lkml]   [2020]   [Jul]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v6 08/17] static_call: Avoid kprobes on inline static_call()s
    Similar to how we disallow kprobes on any other dynamic text
    (ftrace/jump_label) also disallow kprobes on inline static_call()s.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    ---
    arch/x86/kernel/kprobes/opt.c | 4 +-
    include/linux/static_call.h | 11 +++++++
    kernel/kprobes.c | 2 +
    kernel/static_call.c | 64 ++++++++++++++++++++++++++++++++++++++++++
    4 files changed, 80 insertions(+), 1 deletion(-)

    --- a/arch/x86/kernel/kprobes/opt.c
    +++ b/arch/x86/kernel/kprobes/opt.c
    @@ -17,6 +17,7 @@
    #include <linux/ftrace.h>
    #include <linux/frame.h>
    #include <linux/pgtable.h>
    +#include <linux/static_call.h>

    #include <asm/text-patching.h>
    #include <asm/cacheflush.h>
    @@ -209,7 +210,8 @@ static int copy_optimized_instructions(u
    /* Check whether the address range is reserved */
    if (ftrace_text_reserved(src, src + len - 1) ||
    alternatives_text_reserved(src, src + len - 1) ||
    - jump_label_text_reserved(src, src + len - 1))
    + jump_label_text_reserved(src, src + len - 1) ||
    + static_call_text_reserved(src, src + len - 1))
    return -EBUSY;

    return len;
    --- a/include/linux/static_call.h
    +++ b/include/linux/static_call.h
    @@ -110,6 +110,7 @@ struct static_call_key {

    extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
    extern int static_call_mod_init(struct module *mod);
    +extern int static_call_text_reserved(void *start, void *end);

    #define DEFINE_STATIC_CALL(name, _func) \
    DECLARE_STATIC_CALL(name, _func); \
    @@ -153,6 +154,11 @@ void __static_call_update(struct static_
    cpus_read_unlock();
    }

    +static inline int static_call_text_reserved(void *start, void *end)
    +{
    + return 0;
    +}
    +
    #define EXPORT_STATIC_CALL(name) \
    EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
    EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
    @@ -182,6 +188,11 @@ void __static_call_update(struct static_
    WRITE_ONCE(key->func, func);
    }

    +static inline int static_call_text_reserved(void *start, void *end)
    +{
    + return 0;
    +}
    +
    #define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
    #define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))

    --- a/kernel/kprobes.c
    +++ b/kernel/kprobes.c
    @@ -35,6 +35,7 @@
    #include <linux/ftrace.h>
    #include <linux/cpu.h>
    #include <linux/jump_label.h>
    +#include <linux/static_call.h>

    #include <asm/sections.h>
    #include <asm/cacheflush.h>
    @@ -1591,6 +1592,7 @@ static int check_kprobe_address_safe(str
    if (!kernel_text_address((unsigned long) p->addr) ||
    within_kprobe_blacklist((unsigned long) p->addr) ||
    jump_label_text_reserved(p->addr, p->addr) ||
    + static_call_text_reserved(p->addr, p->addr) ||
    find_bug((unsigned long)p->addr)) {
    ret = -EINVAL;
    goto out;
    --- a/kernel/static_call.c
    +++ b/kernel/static_call.c
    @@ -204,8 +204,58 @@ static int __static_call_init(struct mod
    return 0;
    }

    +static int addr_conflict(struct static_call_site *site, void *start, void *end)
    +{
    + unsigned long addr = (unsigned long)static_call_addr(site);
    +
    + if (addr <= (unsigned long)end &&
    + addr + CALL_INSN_SIZE > (unsigned long)start)
    + return 1;
    +
    + return 0;
    +}
    +
    +static int __static_call_text_reserved(struct static_call_site *iter_start,
    + struct static_call_site *iter_stop,
    + void *start, void *end)
    +{
    + struct static_call_site *iter = iter_start;
    +
    + while (iter < iter_stop) {
    + if (addr_conflict(iter, start, end))
    + return 1;
    + iter++;
    + }
    +
    + return 0;
    +}
    +
    #ifdef CONFIG_MODULES

    +static int __static_call_mod_text_reserved(void *start, void *end)
    +{
    + struct module *mod;
    + int ret;
    +
    + preempt_disable();
    + mod = __module_text_address((unsigned long)start);
    + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
    + if (!try_module_get(mod))
    + mod = NULL;
    + preempt_enable();
    +
    + if (!mod)
    + return 0;
    +
    + ret = __static_call_text_reserved(mod->static_call_sites,
    + mod->static_call_sites + mod->num_static_call_sites,
    + start, end);
    +
    + module_put(mod);
    +
    + return ret;
    +}
    +
    static int static_call_add_module(struct module *mod)
    {
    return __static_call_init(mod, mod->static_call_sites,
    @@ -275,6 +325,20 @@ static struct notifier_block static_call

    #endif /* CONFIG_MODULES */

    +int static_call_text_reserved(void *start, void *end)
    +{
    + int ret = __static_call_text_reserved(__start_static_call_sites,
    + __stop_static_call_sites, start, end);
    +
    + if (ret)
    + return ret;
    +
    +#ifdef CONFIG_MODULES
    + ret = __static_call_mod_text_reserved(start, end);
    +#endif
    + return ret;
    +}
    +
    static void __init static_call_init(void)
    {
    int ret;

    \
     
     \ /
      Last update: 2020-07-10 15:47    [W:4.105 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site