lkml.org 
[lkml]   [2022]   [Apr]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v8 04/70] radix tree test suite: add support for slab bulk APIs
    Date
    From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

    Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the
    radix tree test suite.

    Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
    ---
    tools/include/linux/slab.h | 4 ++
    tools/testing/radix-tree/linux.c | 118 ++++++++++++++++++++++++++++++-
    2 files changed, 120 insertions(+), 2 deletions(-)

    diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
    index f41d8a0eb1a4..232218412573 100644
    --- a/tools/include/linux/slab.h
    +++ b/tools/include/linux/slab.h
    @@ -35,4 +35,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
    unsigned int align, unsigned int flags,
    void (*ctor)(void *));

    +void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
    +int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
    + void **list);
    +
    #endif /* _TOOLS_SLAB_H */
    diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
    index e64741ef89ef..75c416ef8618 100644
    --- a/tools/testing/radix-tree/linux.c
    +++ b/tools/testing/radix-tree/linux.c
    @@ -92,14 +92,13 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
    return p;
    }

    -void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    +void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
    {
    assert(objp);
    uatomic_dec(&nr_allocated);
    uatomic_dec(&cachep->nr_allocated);
    if (kmalloc_verbose)
    printf("Freeing %p to slab\n", objp);
    - pthread_mutex_lock(&cachep->lock);
    if (cachep->nr_objs > 10 || cachep->align) {
    memset(objp, POISON_FREE, cachep->size);
    free(objp);
    @@ -109,9 +108,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    node->parent = cachep->objs;
    cachep->objs = node;
    }
    +}
    +
    +void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    +{
    + pthread_mutex_lock(&cachep->lock);
    + kmem_cache_free_locked(cachep, objp);
    pthread_mutex_unlock(&cachep->lock);
    }

    +void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
    +{
    + if (kmalloc_verbose)
    + pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
    +
    + pthread_mutex_lock(&cachep->lock);
    + for (int i = 0; i < size; i++)
    + kmem_cache_free_locked(cachep, list[i]);
    + pthread_mutex_unlock(&cachep->lock);
    +}
    +
    +int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
    + void **p)
    +{
    + size_t i;
    +
    + if (kmalloc_verbose)
    + pr_debug("Bulk alloc %lu\n", size);
    +
    + if (!(gfp & __GFP_DIRECT_RECLAIM)) {
    + if (cachep->non_kernel < size)
    + return 0;
    +
    + cachep->non_kernel -= size;
    + }
    +
    + pthread_mutex_lock(&cachep->lock);
    + if (cachep->nr_objs >= size) {
    + struct radix_tree_node *node;
    +
    + for (i = 0; i < size; i++) {
    + node = cachep->objs;
    + cachep->nr_objs--;
    + cachep->objs = node->parent;
    + p[i] = node;
    + node->parent = NULL;
    + }
    + pthread_mutex_unlock(&cachep->lock);
    + } else {
    + pthread_mutex_unlock(&cachep->lock);
    + for (i = 0; i < size; i++) {
    + if (cachep->align) {
    + posix_memalign(&p[i], cachep->align,
    + cachep->size * size);
    + } else {
    + p[i] = malloc(cachep->size * size);
    + }
    + if (cachep->ctor)
    + cachep->ctor(p[i]);
    + else if (gfp & __GFP_ZERO)
    + memset(p[i], 0, cachep->size);
    + }
    + }
    +
    + for (i = 0; i < size; i++) {
    + uatomic_inc(&nr_allocated);
    + uatomic_inc(&cachep->nr_allocated);
    + uatomic_inc(&cachep->nr_tallocated);
    + if (kmalloc_verbose)
    + printf("Allocating %p from slab\n", p[i]);
    + }
    +
    + return size;
    +}
    +
    struct kmem_cache *
    kmem_cache_create(const char *name, unsigned int size, unsigned int align,
    unsigned int flags, void (*ctor)(void *))
    @@ -129,3 +199,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
    ret->non_kernel = 0;
    return ret;
    }
    +
    +/*
    + * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
    + */
    +void test_kmem_cache_bulk(void)
    +{
    + int i;
    + void *list[12];
    + static struct kmem_cache *test_cache, *test_cache2;
    +
    + /*
    + * Testing the bulk allocators without aligned kmem_cache to force the
    + * bulk alloc/free to reuse
    + */
    + test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
    +
    + for (i = 0; i < 5; i++)
    + list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
    +
    + for (i = 0; i < 5; i++)
    + kmem_cache_free(test_cache, list[i]);
    + assert(test_cache->nr_objs == 5);
    +
    + kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
    + kmem_cache_free_bulk(test_cache, 5, list);
    +
    + for (i = 0; i < 12 ; i++)
    + list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
    +
    + for (i = 0; i < 12; i++)
    + kmem_cache_free(test_cache, list[i]);
    +
    + /* The last free will not be kept around */
    + assert(test_cache->nr_objs == 11);
    +
    + /* Aligned caches will immediately free */
    + test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
    +
    + kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
    + kmem_cache_free_bulk(test_cache2, 10, list);
    + assert(!test_cache2->nr_objs);
    +
    +
    +}
    --
    2.35.1
    \
     
     \ /
      Last update: 2022-04-26 17:09    [W:2.270 / U:0.028 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site