lkml.org 
[lkml]   [2021]   [Jul]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 13/35] mm, slub: do initial checks in ___slab_alloc() with irqs enabled
    Date
    As another step of shortening irq disabled sections in ___slab_alloc(), delay
    disabling irqs until we pass the initial checks if there is a cached percpu
    slab and it's suitable for our allocation.

    Now we have to recheck c->page after actually disabling irqs as an allocation
    in irq handler might have replaced it.

    Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
    Acked-by: Mel Gorman <mgorman@techsingularity.net>
    ---
    mm/slub.c | 41 ++++++++++++++++++++++++++++++++---------
    1 file changed, 32 insertions(+), 9 deletions(-)

    diff --git a/mm/slub.c b/mm/slub.c
    index 71a5617b839a..dd01af81dd77 100644
    --- a/mm/slub.c
    +++ b/mm/slub.c
    @@ -2665,8 +2665,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

    stat(s, ALLOC_SLOWPATH);

    - local_irq_save(flags);
    - page = c->page;
    +reread_page:
    +
    + page = READ_ONCE(c->page);
    if (!page) {
    /*
    * if the node is not online or has no normal memory, just
    @@ -2675,6 +2676,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    if (unlikely(node != NUMA_NO_NODE &&
    !node_isset(node, slab_nodes)))
    node = NUMA_NO_NODE;
    + local_irq_save(flags);
    + if (unlikely(c->page)) {
    + local_irq_restore(flags);
    + goto reread_page;
    + }
    goto new_slab;
    }
    redo:
    @@ -2689,8 +2695,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    goto redo;
    } else {
    stat(s, ALLOC_NODE_MISMATCH);
    - deactivate_slab(s, page, c->freelist, c);
    - goto new_slab;
    + goto deactivate_slab;
    }
    }

    @@ -2699,12 +2704,15 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    * PFMEMALLOC but right now, we are losing the pfmemalloc
    * information when the page leaves the per-cpu allocator
    */
    - if (unlikely(!pfmemalloc_match(page, gfpflags))) {
    - deactivate_slab(s, page, c->freelist, c);
    - goto new_slab;
    - }
    + if (unlikely(!pfmemalloc_match(page, gfpflags)))
    + goto deactivate_slab;

    - /* must check again c->freelist in case of cpu migration or IRQ */
    + /* must check again c->page in case IRQ handler changed it */
    + local_irq_save(flags);
    + if (unlikely(page != c->page)) {
    + local_irq_restore(flags);
    + goto reread_page;
    + }
    freelist = c->freelist;
    if (freelist)
    goto load_freelist;
    @@ -2720,6 +2728,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    stat(s, ALLOC_REFILL);

    load_freelist:
    +
    + lockdep_assert_irqs_disabled();
    +
    /*
    * freelist is pointing to the list of objects to be used.
    * page is pointing to the page from which the objects are obtained.
    @@ -2731,11 +2742,23 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    local_irq_restore(flags);
    return freelist;

    +deactivate_slab:
    +
    + local_irq_save(flags);
    + if (page != c->page) {
    + local_irq_restore(flags);
    + goto reread_page;
    + }
    + deactivate_slab(s, page, c->freelist, c);
    +
    new_slab:

    + lockdep_assert_irqs_disabled();
    +
    if (slub_percpu_partial(c)) {
    page = c->page = slub_percpu_partial(c);
    slub_set_percpu_partial(c, page);
    + local_irq_restore(flags);
    stat(s, CPU_PARTIAL_ALLOC);
    goto redo;
    }
    --
    2.32.0
    \
     
     \ /
      Last update: 2021-07-29 15:23    [W:4.160 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site