lkml.org 
[lkml]   [2022]   [Aug]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 01/17] mm/slab: move NUMA-related code to __do_cache_alloc()
    Date
    To implement slab_alloc_node() independent of NUMA configuration,
    move NUMA fallback/alternate allocation code into __do_cache_alloc().

    One functional change here is not to check availability of node
    when allocating from local node.

    Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
    Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    mm/slab.c | 68 +++++++++++++++++++++++++------------------------------
    1 file changed, 31 insertions(+), 37 deletions(-)

    diff --git a/mm/slab.c b/mm/slab.c
    index 10e96137b44f..1656393f55cb 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -3180,13 +3180,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
    return obj ? obj : fallback_alloc(cachep, flags);
    }

    +static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);
    +
    static __always_inline void *
    slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
    unsigned long caller)
    {
    unsigned long save_flags;
    void *ptr;
    - int slab_node = numa_mem_id();
    struct obj_cgroup *objcg = NULL;
    bool init = false;

    @@ -3200,30 +3201,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
    goto out_hooks;

    local_irq_save(save_flags);
    -
    - if (nodeid == NUMA_NO_NODE)
    - nodeid = slab_node;
    -
    - if (unlikely(!get_node(cachep, nodeid))) {
    - /* Node not bootstrapped yet */
    - ptr = fallback_alloc(cachep, flags);
    - goto out;
    - }
    -
    - if (nodeid == slab_node) {
    - /*
    - * Use the locally cached objects if possible.
    - * However ____cache_alloc does not allow fallback
    - * to other nodes. It may fail while we still have
    - * objects on other nodes available.
    - */
    - ptr = ____cache_alloc(cachep, flags);
    - if (ptr)
    - goto out;
    - }
    - /* ___cache_alloc_node can fall back to other nodes */
    - ptr = ____cache_alloc_node(cachep, flags, nodeid);
    -out:
    + ptr = __do_cache_alloc(cachep, flags, nodeid);
    local_irq_restore(save_flags);
    ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
    init = slab_want_init_on_alloc(flags, cachep);
    @@ -3234,31 +3212,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
    }

    static __always_inline void *
    -__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
    +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
    {
    - void *objp;
    + void *objp = NULL;
    + int slab_node = numa_mem_id();

    - if (current->mempolicy || cpuset_do_slab_mem_spread()) {
    - objp = alternate_node_alloc(cache, flags);
    - if (objp)
    - goto out;
    + if (nodeid == NUMA_NO_NODE) {
    + if (current->mempolicy || cpuset_do_slab_mem_spread()) {
    + objp = alternate_node_alloc(cachep, flags);
    + if (objp)
    + goto out;
    + }
    + /*
    + * Use the locally cached objects if possible.
    + * However ____cache_alloc does not allow fallback
    + * to other nodes. It may fail while we still have
    + * objects on other nodes available.
    + */
    + objp = ____cache_alloc(cachep, flags);
    + nodeid = slab_node;
    + } else if (nodeid == slab_node) {
    + objp = ____cache_alloc(cachep, flags);
    + } else if (!get_node(cachep, nodeid)) {
    + /* Node not bootstrapped yet */
    + objp = fallback_alloc(cachep, flags);
    + goto out;
    }
    - objp = ____cache_alloc(cache, flags);

    /*
    * We may just have run out of memory on the local node.
    * ____cache_alloc_node() knows how to locate memory on other nodes
    */
    if (!objp)
    - objp = ____cache_alloc_node(cache, flags, numa_mem_id());
    -
    + objp = ____cache_alloc_node(cachep, flags, nodeid);
    out:
    return objp;
    }
    #else

    static __always_inline void *
    -__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
    {
    return ____cache_alloc(cachep, flags);
    }
    @@ -3284,7 +3277,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
    goto out;

    local_irq_save(save_flags);
    - objp = __do_cache_alloc(cachep, flags);
    + objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
    local_irq_restore(save_flags);
    objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
    prefetchw(objp);
    @@ -3521,7 +3514,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,

    local_irq_disable();
    for (i = 0; i < size; i++) {
    - void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
    + void *objp = kfence_alloc(s, s->object_size, flags) ?:
    + __do_cache_alloc(s, flags, NUMA_NO_NODE);

    if (unlikely(!objp))
    goto error;
    --
    2.32.0
    \
     
     \ /
      Last update: 2022-08-17 12:21    [W:2.862 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site