lkml.org 
[lkml]   [2022]   [Aug]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[RFC PATCH 15/30] lib: introduce slab allocation tagging
    From
    Introduce CONFIG_SLAB_ALLOC_TAGGING which provides helper functions
    to easily instrument slab allocators and adds a codetag_ref field into
    slabobj_ext to store a pointer to the allocation tag associated with
    the code that allocated the slab object.

    Signed-off-by: Suren Baghdasaryan <surenb@google.com>
    Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev>
    Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
    ---
    include/linux/memcontrol.h | 5 +++++
    include/linux/slab.h | 25 +++++++++++++++++++++++++
    include/linux/slab_def.h | 2 +-
    include/linux/slub_def.h | 4 ++--
    lib/Kconfig.debug | 11 +++++++++++
    mm/slab_common.c | 33 +++++++++++++++++++++++++++++++++
    6 files changed, 77 insertions(+), 3 deletions(-)

    diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
    index 315399f77173..97c0153f0247 100644
    --- a/include/linux/memcontrol.h
    +++ b/include/linux/memcontrol.h
    @@ -232,7 +232,12 @@ struct obj_cgroup {
    * if MEMCG_DATA_OBJEXTS is set.
    */
    struct slabobj_ext {
    +#ifdef CONFIG_MEMCG_KMEM
    struct obj_cgroup *objcg;
    +#endif
    +#ifdef CONFIG_SLAB_ALLOC_TAGGING
    + union codetag_ref ref;
    +#endif
    } __aligned(8);

    /*
    diff --git a/include/linux/slab.h b/include/linux/slab.h
    index 55ae3ea864a4..5a198aa02a08 100644
    --- a/include/linux/slab.h
    +++ b/include/linux/slab.h
    @@ -438,6 +438,31 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
    #define kmalloc_index(s) __kmalloc_index(s, true)
    #endif /* !CONFIG_SLOB */

    +#ifdef CONFIG_SLAB_ALLOC_TAGGING
    +
    +#include <linux/alloc_tag.h>
    +
    +union codetag_ref *get_slab_tag_ref(const void *objp);
    +
    +#define slab_tag_add(_old, _new) \
    +do { \
    + if (!ZERO_OR_NULL_PTR(_new) && _old != _new) \
    + alloc_tag_add(get_slab_tag_ref(_new), __ksize(_new)); \
    +} while (0)
    +
    +static inline void slab_tag_dec(const void *ptr)
    +{
    + if (!ZERO_OR_NULL_PTR(ptr))
    + alloc_tag_sub(get_slab_tag_ref(ptr), __ksize(ptr));
    +}
    +
    +#else
    +
    +#define slab_tag_add(_old, _new) do {} while (0)
    +static inline void slab_tag_dec(const void *ptr) {}
    +
    +#endif
    +
    void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
    void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
    void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
    diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
    index e24c9aff6fed..25feb5f7dc32 100644
    --- a/include/linux/slab_def.h
    +++ b/include/linux/slab_def.h
    @@ -106,7 +106,7 @@ static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *sla
    * reciprocal_divide(offset, cache->reciprocal_buffer_size)
    */
    static inline unsigned int obj_to_index(const struct kmem_cache *cache,
    - const struct slab *slab, void *obj)
    + const struct slab *slab, const void *obj)
    {
    u32 offset = (obj - slab->s_mem);
    return reciprocal_divide(offset, cache->reciprocal_buffer_size);
    diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
    index f9c68a9dac04..940c146768d4 100644
    --- a/include/linux/slub_def.h
    +++ b/include/linux/slub_def.h
    @@ -170,14 +170,14 @@ static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *sla

    /* Determine object index from a given position */
    static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
    - void *addr, void *obj)
    + void *addr, const void *obj)
    {
    return reciprocal_divide(kasan_reset_tag(obj) - addr,
    cache->reciprocal_size);
    }

    static inline unsigned int obj_to_index(const struct kmem_cache *cache,
    - const struct slab *slab, void *obj)
    + const struct slab *slab, const void *obj)
    {
    if (is_kfence_address(obj))
    return 0;
    diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
    index 6686648843b3..08c97a978906 100644
    --- a/lib/Kconfig.debug
    +++ b/lib/Kconfig.debug
    @@ -989,6 +989,17 @@ config PAGE_ALLOC_TAGGING
    initiated at that code location. The mechanism can be used to track
    memory leaks with a low performance impact.

    +config SLAB_ALLOC_TAGGING
    + bool "Enable slab allocation tagging"
    + default n
    + select ALLOC_TAGGING
    + select SLAB_OBJ_EXT
    + help
    + Instrument slab allocators to track allocation source code and
    + collect statistics on the number of allocations and their total size
    + initiated at that code location. The mechanism can be used to track
    + memory leaks with a low performance impact.
    +
    source "lib/Kconfig.kasan"
    source "lib/Kconfig.kfence"

    diff --git a/mm/slab_common.c b/mm/slab_common.c
    index 17996649cfe3..272eda62ecaa 100644
    --- a/mm/slab_common.c
    +++ b/mm/slab_common.c
    @@ -202,6 +202,39 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
    return NULL;
    }

    +#ifdef CONFIG_SLAB_ALLOC_TAGGING
    +
    +union codetag_ref *get_slab_tag_ref(const void *objp)
    +{
    + struct slabobj_ext *obj_exts;
    + union codetag_ref *res = NULL;
    + struct slab *slab;
    + unsigned int off;
    +
    + slab = virt_to_slab(objp);
    + /*
    + * We could be given a kmalloc_large() object, skip those. They use
    + * alloc_pages and can be tracked by page allocation tracking.
    + */
    + if (!slab)
    + goto out;
    +
    + obj_exts = slab_obj_exts(slab);
    + if (!obj_exts)
    + goto out;
    +
    + if (!slab->slab_cache)
    + goto out;
    +
    + off = obj_to_index(slab->slab_cache, slab, objp);
    + res = &obj_exts[off].ref;
    +out:
    + return res;
    +}
    +EXPORT_SYMBOL(get_slab_tag_ref);
    +
    +#endif /* CONFIG_SLAB_ALLOC_TAGGING */
    +
    static struct kmem_cache *create_cache(const char *name,
    unsigned int object_size, unsigned int align,
    slab_flags_t flags, unsigned int useroffset,
    --
    2.37.2.672.g94769d06f0-goog
    \
     
     \ /
      Last update: 2022-08-30 23:52    [W:2.213 / U:0.148 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site