lkml.org 
[lkml]   [2006]   [Jan]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[patch 8/9] slab: extract virt_to_{cache|slab}
    From
    Date
    From: Pekka Enberg <penberg@cs.helsinki.fi>

    This patch introduces virt_to_cache() and virt_to_slab() functions
    to reduce duplicate code and introduce a proper abstraction should
    we want to support other kind of mapping for address to slab and
    cache (eg. for vmalloc() or I/O memory).

    Acked-by: Manfred Spraul <manfred@colorfullife.com>
    Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
    ---

    mm/slab.c | 22 +++++++++++++++++-----
    1 file changed, 17 insertions(+), 5 deletions(-)

    Index: 2.6/mm/slab.c
    ===================================================================
    --- 2.6.orig/mm/slab.c
    +++ 2.6/mm/slab.c
    @@ -594,6 +594,18 @@ static inline struct slab *page_get_slab
    return (struct slab *)page->lru.prev;
    }

    +static inline struct kmem_cache *virt_to_cache(const void *obj)
    +{
    + struct page *page = virt_to_page(obj);
    + return page_get_cache(page);
    +}
    +
    +static inline struct slab *virt_to_slab(const void *obj)
    +{
    + struct page *page = virt_to_page(obj);
    + return page_get_slab(page);
    +}
    +
    /* These are the default caches for kmalloc. Custom caches can have other sizes. */
    struct cache_sizes malloc_sizes[] = {
    #define CACHE(x) { .cs_size = (x) },
    @@ -1423,7 +1435,7 @@ static void check_poison_obj(kmem_cache_
    /* Print some data about the neighboring objects, if they
    * exist:
    */
    - struct slab *slabp = page_get_slab(virt_to_page(objp));
    + struct slab *slabp = virt_to_slab(objp);
    int objnr;

    objnr = (objp-slabp->s_mem)/cachep->buffer_size;
    @@ -2712,7 +2724,7 @@ static void free_block(kmem_cache_t *cac
    void *objp = objpp[i];
    struct slab *slabp;

    - slabp = page_get_slab(virt_to_page(objp));
    + slabp = virt_to_slab(objp);
    l3 = cachep->nodelists[node];
    list_del(&slabp->list);
    check_spinlock_acquired_node(cachep, node);
    @@ -2814,7 +2826,7 @@ static inline void __cache_free(kmem_cac
    #ifdef CONFIG_NUMA
    {
    struct slab *slabp;
    - slabp = page_get_slab(virt_to_page(objp));
    + slabp = virt_to_slab(objp);
    if (unlikely(slabp->nodeid != numa_node_id())) {
    struct array_cache *alien = NULL;
    int nodeid = slabp->nodeid;
    @@ -3089,7 +3101,7 @@ void kfree(const void *objp)
    return;
    local_irq_save(flags);
    kfree_debugcheck(objp);
    - c = page_get_cache(virt_to_page(objp));
    + c = virt_to_cache(objp);
    __cache_free(c, (void*)objp);
    local_irq_restore(flags);
    }
    @@ -3659,7 +3671,7 @@ unsigned int ksize(const void *objp)
    if (unlikely(objp == NULL))
    return 0;

    - return obj_size(page_get_cache(virt_to_page(objp)));
    + return obj_size(virt_to_cache(objp));
    }


    --


    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2006-01-03 21:29    [W:3.165 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site