lkml.org 
[lkml]   [2021]   [Jul]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 29/35] mm: slub: Move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context
    Date
    From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

    flush_all() flushes a specific SLAB cache on each CPU (where the cache
    is present). The deactivate_slab()/__free_slab() invocation happens
    within IPI handler and is problematic for PREEMPT_RT.

    The flush operation is not a frequent operation or a hot path. The
    per-CPU flush operation can be moved to within a workqueue.

    [vbabka@suse.cz: adapt to new SLUB changes]
    Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
    Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    mm/slub.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++--------
    1 file changed, 48 insertions(+), 8 deletions(-)

    diff --git a/mm/slub.c b/mm/slub.c
    index 9fbc5396f3e1..dbb74dbe1c1e 100644
    --- a/mm/slub.c
    +++ b/mm/slub.c
    @@ -2513,33 +2513,73 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
    unfreeze_partials_cpu(s, c);
    }

    +struct slub_flush_work {
    + struct work_struct work;
    + struct kmem_cache *s;
    + bool skip;
    +};
    +
    /*
    * Flush cpu slab.
    *
    - * Called from IPI handler with interrupts disabled.
    + * Called from CPU work handler with migration disabled.
    */
    -static void flush_cpu_slab(void *d)
    +static void flush_cpu_slab(struct work_struct *w)
    {
    - struct kmem_cache *s = d;
    - struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
    + struct kmem_cache *s;
    + struct kmem_cache_cpu *c;
    + struct slub_flush_work *sfw;
    +
    + sfw = container_of(w, struct slub_flush_work, work);
    +
    + s = sfw->s;
    + c = this_cpu_ptr(s->cpu_slab);

    if (c->page)
    - flush_slab(s, c, false);
    + flush_slab(s, c, true);

    unfreeze_partials(s);
    }

    -static bool has_cpu_slab(int cpu, void *info)
    +static bool has_cpu_slab(int cpu, struct kmem_cache *s)
    {
    - struct kmem_cache *s = info;
    struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);

    return c->page || slub_percpu_partial(c);
    }

    +static DEFINE_MUTEX(flush_lock);
    +static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
    +
    static void flush_all(struct kmem_cache *s)
    {
    - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
    + struct slub_flush_work *sfw;
    + unsigned int cpu;
    +
    + mutex_lock(&flush_lock);
    + cpus_read_lock();
    +
    + for_each_online_cpu(cpu) {
    + sfw = &per_cpu(slub_flush, cpu);
    + if (!has_cpu_slab(cpu, s)) {
    + sfw->skip = true;
    + continue;
    + }
    + INIT_WORK(&sfw->work, flush_cpu_slab);
    + sfw->skip = false;
    + sfw->s = s;
    + schedule_work_on(cpu, &sfw->work);
    + }
    +
    + for_each_online_cpu(cpu) {
    + sfw = &per_cpu(slub_flush, cpu);
    + if (sfw->skip)
    + continue;
    + flush_work(&sfw->work);
    + }
    +
    + cpus_read_unlock();
    + mutex_unlock(&flush_lock);
    }

    /*
    --
    2.32.0
    \
     
     \ /
      Last update: 2021-07-29 15:24    [W:2.159 / U:0.172 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site