lkml.org 
[lkml]   [2020]   [May]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 13/19] mm: memcg/slab: simplify memcg cache creation
    Date
    Because the number of non-root kmem_caches doesn't depend on the
    number of memory cgroups anymore and is generally not very big,
    there is no more need for a dedicated workqueue.

    Also, as there is no more need to pass any arguments to the
    memcg_create_kmem_cache() except the root kmem_cache, it's
    possible to just embed the work structure into the kmem_cache
    and avoid the dynamic allocation of the work structure.

    This will also simplify the synchronization: for each root kmem_cache
    there is only one work. So there will be no more concurrent attempts
    to create a non-root kmem_cache for a root kmem_cache: the second and
    all following attempts to queue the work will fail.

    On the kmem_cache destruction path there is no more need to call the
    expensive flush_workqueue() and wait for all pending works to be
    finished. Instead, cancel_work_sync() can be used to cancel/wait for
    only one work.

    Signed-off-by: Roman Gushchin <guro@fb.com>
    Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    include/linux/memcontrol.h | 1 -
    mm/memcontrol.c | 48 +-------------------------------------
    mm/slab.h | 2 ++
    mm/slab_common.c | 22 +++++++++--------
    4 files changed, 15 insertions(+), 58 deletions(-)

    diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
    index 4f5cf4d621f6..18ef6c85de5b 100644
    --- a/include/linux/memcontrol.h
    +++ b/include/linux/memcontrol.h
    @@ -1420,7 +1420,6 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
    void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);

    extern struct static_key_false memcg_kmem_enabled_key;
    -extern struct workqueue_struct *memcg_kmem_cache_wq;

    extern int memcg_nr_cache_ids;
    void memcg_get_cache_ids(void);
    diff --git a/mm/memcontrol.c b/mm/memcontrol.c
    index 7674c8a6c84d..883969cdfb5f 100644
    --- a/mm/memcontrol.c
    +++ b/mm/memcontrol.c
    @@ -399,8 +399,6 @@ void memcg_put_cache_ids(void)
    */
    DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
    EXPORT_SYMBOL(memcg_kmem_enabled_key);
    -
    -struct workqueue_struct *memcg_kmem_cache_wq;
    #endif

    static int memcg_shrinker_map_size;
    @@ -2852,39 +2850,6 @@ static void memcg_free_cache_id(int id)
    ida_simple_remove(&memcg_cache_ida, id);
    }

    -struct memcg_kmem_cache_create_work {
    - struct kmem_cache *cachep;
    - struct work_struct work;
    -};
    -
    -static void memcg_kmem_cache_create_func(struct work_struct *w)
    -{
    - struct memcg_kmem_cache_create_work *cw =
    - container_of(w, struct memcg_kmem_cache_create_work, work);
    - struct kmem_cache *cachep = cw->cachep;
    -
    - memcg_create_kmem_cache(cachep);
    -
    - kfree(cw);
    -}
    -
    -/*
    - * Enqueue the creation of a per-memcg kmem_cache.
    - */
    -static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep)
    -{
    - struct memcg_kmem_cache_create_work *cw;
    -
    - cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
    - if (!cw)
    - return;
    -
    - cw->cachep = cachep;
    - INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
    -
    - queue_work(memcg_kmem_cache_wq, &cw->work);
    -}
    -
    /**
    * memcg_kmem_get_cache: select memcg or root cache for allocation
    * @cachep: the original global kmem cache
    @@ -2901,7 +2866,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)

    memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
    if (unlikely(!memcg_cachep)) {
    - memcg_schedule_kmem_cache_create(cachep);
    + queue_work(system_wq, &cachep->memcg_params.work);
    return cachep;
    }

    @@ -7000,17 +6965,6 @@ static int __init mem_cgroup_init(void)
    {
    int cpu, node;

    -#ifdef CONFIG_MEMCG_KMEM
    - /*
    - * Kmem cache creation is mostly done with the slab_mutex held,
    - * so use a workqueue with limited concurrency to avoid stalling
    - * all worker threads in case lots of cgroups are created and
    - * destroyed simultaneously.
    - */
    - memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
    - BUG_ON(!memcg_kmem_cache_wq);
    -#endif
    -
    cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
    memcg_hotplug_cpu_dead);

    diff --git a/mm/slab.h b/mm/slab.h
    index 31a54e22b3fb..a730cbe1db66 100644
    --- a/mm/slab.h
    +++ b/mm/slab.h
    @@ -45,12 +45,14 @@ struct kmem_cache {
    * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory
    * cgroups.
    * @root_caches_node: list node for slab_root_caches list.
    + * @work: work struct used to create the non-root cache.
    */
    struct memcg_cache_params {
    struct kmem_cache *root_cache;

    struct kmem_cache *memcg_cache;
    struct list_head __root_caches_node;
    + struct work_struct work;
    };
    #endif /* CONFIG_SLOB */

    diff --git a/mm/slab_common.c b/mm/slab_common.c
    index e9deaafddbb6..10aa2acb84ca 100644
    --- a/mm/slab_common.c
    +++ b/mm/slab_common.c
    @@ -132,10 +132,18 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,

    LIST_HEAD(slab_root_caches);

    +static void memcg_kmem_cache_create_func(struct work_struct *work)
    +{
    + struct kmem_cache *cachep = container_of(work, struct kmem_cache,
    + memcg_params.work);
    + memcg_create_kmem_cache(cachep);
    +}
    +
    void slab_init_memcg_params(struct kmem_cache *s)
    {
    s->memcg_params.root_cache = NULL;
    s->memcg_params.memcg_cache = NULL;
    + INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func);
    }

    static void init_memcg_params(struct kmem_cache *s,
    @@ -584,15 +592,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
    return 0;
    }

    -static void flush_memcg_workqueue(struct kmem_cache *s)
    +static void cancel_memcg_cache_creation(struct kmem_cache *s)
    {
    - /*
    - * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
    - * deactivates the memcg kmem_caches through workqueue. Make sure all
    - * previous workitems on workqueue are processed.
    - */
    - if (likely(memcg_kmem_cache_wq))
    - flush_workqueue(memcg_kmem_cache_wq);
    + cancel_work_sync(&s->memcg_params.work);
    }
    #else
    static inline int shutdown_memcg_caches(struct kmem_cache *s)
    @@ -600,7 +602,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
    return 0;
    }

    -static inline void flush_memcg_workqueue(struct kmem_cache *s)
    +static inline void cancel_memcg_cache_creation(struct kmem_cache *s)
    {
    }
    #endif /* CONFIG_MEMCG_KMEM */
    @@ -619,7 +621,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
    if (unlikely(!s))
    return;

    - flush_memcg_workqueue(s);
    + cancel_memcg_cache_creation(s);

    get_online_cpus();
    get_online_mems();
    --
    2.25.4
    \
     
     \ /
      Last update: 2020-05-26 23:44    [W:4.207 / U:0.288 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site