lkml.org 
[lkml]   [2023]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v2 18/21] mm/slab: move kmalloc() functions from slab_common.c to slub.c
    On Mon, Nov 20, 2023 at 07:34:29PM +0100, Vlastimil Babka wrote:
    > This will eliminate a call between compilation units through
    > __kmem_cache_alloc_node() and allow better inlining of the allocation
    > fast path.
    >
    > Reviewed-by: Kees Cook <keescook@chromium.org>
    > Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
    > ---
    > mm/slab.h | 3 --
    > mm/slab_common.c | 119 ----------------------------------------------------
    > mm/slub.c | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++----
    > 3 files changed, 118 insertions(+), 130 deletions(-)
    >
    > diff --git a/mm/slab.h b/mm/slab.h
    > index 7d7cc7af614e..54deeb0428c6 100644
    > --- a/mm/slab.h
    > +++ b/mm/slab.h
    > @@ -416,9 +416,6 @@ kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
    > return kmalloc_caches[kmalloc_type(flags, caller)][index];
    > }
    >
    > -void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
    > - int node, size_t orig_size,
    > - unsigned long caller);
    > gfp_t kmalloc_fix_flags(gfp_t flags);
    >
    > /* Functions provided by the slab allocators */
    > diff --git a/mm/slab_common.c b/mm/slab_common.c
    > index 31ade17a7ad9..238293b1dbe1 100644
    > --- a/mm/slab_common.c
    > +++ b/mm/slab_common.c
    > @@ -936,50 +936,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
    > slab_state = UP;
    > }
    >
    > -static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
    > -static __always_inline
    > -void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
    > -{
    > - struct kmem_cache *s;
    > - void *ret;
    > -
    > - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
    > - ret = __kmalloc_large_node(size, flags, node);
    > - trace_kmalloc(caller, ret, size,
    > - PAGE_SIZE << get_order(size), flags, node);
    > - return ret;
    > - }
    > -
    > - if (unlikely(!size))
    > - return ZERO_SIZE_PTR;
    > -
    > - s = kmalloc_slab(size, flags, caller);
    > -
    > - ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
    > - ret = kasan_kmalloc(s, ret, size, flags);
    > - trace_kmalloc(caller, ret, size, s->size, flags, node);
    > - return ret;
    > -}
    > -
    > -void *__kmalloc_node(size_t size, gfp_t flags, int node)
    > -{
    > - return __do_kmalloc_node(size, flags, node, _RET_IP_);
    > -}
    > -EXPORT_SYMBOL(__kmalloc_node);
    > -
    > -void *__kmalloc(size_t size, gfp_t flags)
    > -{
    > - return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
    > -}
    > -EXPORT_SYMBOL(__kmalloc);
    > -
    > -void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
    > - int node, unsigned long caller)
    > -{
    > - return __do_kmalloc_node(size, flags, node, caller);
    > -}
    > -EXPORT_SYMBOL(__kmalloc_node_track_caller);
    > -
    > /**
    > * __ksize -- Report full size of underlying allocation
    > * @object: pointer to the object
    > @@ -1016,30 +972,6 @@ size_t __ksize(const void *object)
    > return slab_ksize(folio_slab(folio)->slab_cache);
    > }
    >
    > -void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
    > -{
    > - void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
    > - size, _RET_IP_);
    > -
    > - trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
    > -
    > - ret = kasan_kmalloc(s, ret, size, gfpflags);
    > - return ret;
    > -}
    > -EXPORT_SYMBOL(kmalloc_trace);
    > -
    > -void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
    > - int node, size_t size)
    > -{
    > - void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
    > -
    > - trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
    > -
    > - ret = kasan_kmalloc(s, ret, size, gfpflags);
    > - return ret;
    > -}
    > -EXPORT_SYMBOL(kmalloc_node_trace);
    > -
    > gfp_t kmalloc_fix_flags(gfp_t flags)
    > {
    > gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
    > @@ -1052,57 +984,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
    > return flags;
    > }
    >
    > -/*
    > - * To avoid unnecessary overhead, we pass through large allocation requests
    > - * directly to the page allocator. We use __GFP_COMP, because we will need to
    > - * know the allocation order to free the pages properly in kfree.
    > - */
    > -
    > -static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
    > -{
    > - struct page *page;
    > - void *ptr = NULL;
    > - unsigned int order = get_order(size);
    > -
    > - if (unlikely(flags & GFP_SLAB_BUG_MASK))
    > - flags = kmalloc_fix_flags(flags);
    > -
    > - flags |= __GFP_COMP;
    > - page = alloc_pages_node(node, flags, order);
    > - if (page) {
    > - ptr = page_address(page);
    > - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
    > - PAGE_SIZE << order);
    > - }
    > -
    > - ptr = kasan_kmalloc_large(ptr, size, flags);
    > - /* As ptr might get tagged, call kmemleak hook after KASAN. */
    > - kmemleak_alloc(ptr, size, 1, flags);
    > - kmsan_kmalloc_large(ptr, size, flags);
    > -
    > - return ptr;
    > -}
    > -
    > -void *kmalloc_large(size_t size, gfp_t flags)
    > -{
    > - void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
    > -
    > - trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
    > - flags, NUMA_NO_NODE);
    > - return ret;
    > -}
    > -EXPORT_SYMBOL(kmalloc_large);
    > -
    > -void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    > -{
    > - void *ret = __kmalloc_large_node(size, flags, node);
    > -
    > - trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
    > - flags, node);
    > - return ret;
    > -}
    > -EXPORT_SYMBOL(kmalloc_large_node);
    > -
    > #ifdef CONFIG_SLAB_FREELIST_RANDOM
    > /* Randomize a generic freelist */
    > static void freelist_randomize(unsigned int *list,
    > diff --git a/mm/slub.c b/mm/slub.c
    > index 2baa9e94d9df..d6bc15929d22 100644
    > --- a/mm/slub.c
    > +++ b/mm/slub.c
    > @@ -3851,14 +3851,6 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
    > }
    > EXPORT_SYMBOL(kmem_cache_alloc_lru);
    >
    > -void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
    > - int node, size_t orig_size,
    > - unsigned long caller)
    > -{
    > - return slab_alloc_node(s, NULL, gfpflags, node,
    > - caller, orig_size);
    > -}
    > -
    > /**
    > * kmem_cache_alloc_node - Allocate an object on the specified node
    > * @s: The cache to allocate from.
    > @@ -3882,6 +3874,124 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
    > }
    > EXPORT_SYMBOL(kmem_cache_alloc_node);
    >
    > +/*
    > + * To avoid unnecessary overhead, we pass through large allocation requests
    > + * directly to the page allocator. We use __GFP_COMP, because we will need to
    > + * know the allocation order to free the pages properly in kfree.
    > + */
    > +static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
    > +{
    > + struct page *page;
    > + void *ptr = NULL;
    > + unsigned int order = get_order(size);
    > +
    > + if (unlikely(flags & GFP_SLAB_BUG_MASK))
    > + flags = kmalloc_fix_flags(flags);
    > +
    > + flags |= __GFP_COMP;
    > + page = alloc_pages_node(node, flags, order);
    > + if (page) {
    > + ptr = page_address(page);
    > + mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
    > + PAGE_SIZE << order);
    > + }
    > +
    > + ptr = kasan_kmalloc_large(ptr, size, flags);
    > + /* As ptr might get tagged, call kmemleak hook after KASAN. */
    > + kmemleak_alloc(ptr, size, 1, flags);
    > + kmsan_kmalloc_large(ptr, size, flags);
    > +
    > + return ptr;
    > +}
    > +
    > +void *kmalloc_large(size_t size, gfp_t flags)
    > +{
    > + void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
    > +
    > + trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
    > + flags, NUMA_NO_NODE);
    > + return ret;
    > +}
    > +EXPORT_SYMBOL(kmalloc_large);
    > +
    > +void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    > +{
    > + void *ret = __kmalloc_large_node(size, flags, node);
    > +
    > + trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
    > + flags, node);
    > + return ret;
    > +}
    > +EXPORT_SYMBOL(kmalloc_large_node);
    > +
    > +static __always_inline
    > +void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
    > + unsigned long caller)
    > +{
    > + struct kmem_cache *s;
    > + void *ret;
    > +
    > + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
    > + ret = __kmalloc_large_node(size, flags, node);
    > + trace_kmalloc(caller, ret, size,
    > + PAGE_SIZE << get_order(size), flags, node);
    > + return ret;
    > + }
    > +
    > + if (unlikely(!size))
    > + return ZERO_SIZE_PTR;
    > +
    > + s = kmalloc_slab(size, flags, caller);
    > +
    > + ret = slab_alloc_node(s, NULL, flags, node, caller, size);
    > + ret = kasan_kmalloc(s, ret, size, flags);
    > + trace_kmalloc(caller, ret, size, s->size, flags, node);
    > + return ret;
    > +}
    > +
    > +void *__kmalloc_node(size_t size, gfp_t flags, int node)
    > +{
    > + return __do_kmalloc_node(size, flags, node, _RET_IP_);
    > +}
    > +EXPORT_SYMBOL(__kmalloc_node);
    > +
    > +void *__kmalloc(size_t size, gfp_t flags)
    > +{
    > + return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
    > +}
    > +EXPORT_SYMBOL(__kmalloc);
    > +
    > +void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
    > + int node, unsigned long caller)
    > +{
    > + return __do_kmalloc_node(size, flags, node, caller);
    > +}
    > +EXPORT_SYMBOL(__kmalloc_node_track_caller);
    > +
    > +void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
    > +{
    > + void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
    > + _RET_IP_, size);
    > +
    > + trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
    > +
    > + ret = kasan_kmalloc(s, ret, size, gfpflags);
    > + return ret;
    > +}
    > +EXPORT_SYMBOL(kmalloc_trace);
    > +
    > +void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
    > + int node, size_t size)
    > +{
    > + void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
    > +
    > + trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
    > +
    > + ret = kasan_kmalloc(s, ret, size, gfpflags);
    > + return ret;
    > +}
    > +EXPORT_SYMBOL(kmalloc_node_trace);
    > +
    > static noinline void free_to_partial_list(
    > struct kmem_cache *s, struct slab *slab,
    > void *head, void *tail, int bulk_cnt,
    >
    > --

    Looks good to me,
    Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

    > 2.42.1
    >
    >

    \
     
     \ /
      Last update: 2023-12-07 02:32    [W:6.443 / U:1.408 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site