lkml.org 
[lkml]   [2024]   [Mar]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC 09/10] mm: page_frag: introduce prepare/commit API for page_frag
    Date
    There are many use cases that need minimum memory in order
    for forward progressing, but can do better if there is more
    memory available.

    Currently skb_page_frag_refill() API is used to solve the
    above usecases, as mentioned in [1], its implementation is
    similar to the one in mm subsystem.

    To unify those two page_frag implementations, introduce a
    prepare API to ensure minimum memory is satisfied and return
    how much the actual memory is available to the caller.

    And the caller can decide how much memory to use by calling
    commit API, or not calling the commit API if deciding to not
    use any memory.

    1. https://lore.kernel.org/all/20240228093013.8263-1-linyunsheng@huawei.com/

    Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
    ---
    include/linux/page_frag_cache.h | 72 ++++++++++++++++++++++++++++++++-
    mm/page_frag_alloc.c | 13 +++---
    2 files changed, 75 insertions(+), 10 deletions(-)

    diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
    index a97a1ac017d6..3f17c0eba7fa 100644
    --- a/include/linux/page_frag_cache.h
    +++ b/include/linux/page_frag_cache.h
    @@ -43,8 +43,76 @@ static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)

    void page_frag_cache_drain(struct page_frag_cache *nc);
    void __page_frag_cache_drain(struct page *page, unsigned int count);
    -void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
    - gfp_t gfp_mask);
    +void *__page_frag_alloc_prepare(struct page_frag_cache *nc, unsigned int fragsz,
    + gfp_t gfp_mask);
    +
    +static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
    + unsigned int fragsz, gfp_t gfp_mask)
    +{
    + void *va;
    +
    + va = __page_frag_alloc_prepare(nc, fragsz, gfp_mask);
    + if (unlikely(!va))
    + return NULL;
    +
    + va += nc->offset;
    + nc->pagecnt_bias--;
    + nc->offset = nc->offset + fragsz;
    +
    + return va;
    +}
    +
    +static inline void *page_frag_alloc_prepare(struct page_frag_cache *nc,
    + unsigned int *offset,
    + unsigned int *size,
    + gfp_t gfp_mask)
    +{
    + void *va;
    +
    + va = __page_frag_alloc_prepare(nc, *size, gfp_mask);
    + if (unlikely(!va))
    + return NULL;
    +
    + *offset = nc->offset;
    +
    +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
    + *size = nc->size_mask - *offset + 1;
    +#else
    + *size = PAGE_SIZE - *offset;
    +#endif
    +
    + return va;
    +}
    +
    +static inline void *page_frag_alloc_prepare_align(struct page_frag_cache *nc,
    + unsigned int *offset,
    + unsigned int *size,
    + unsigned int align,
    + gfp_t gfp_mask)
    +{
    + unsigned int old_offset = nc->offset;
    +
    + WARN_ON_ONCE(!is_power_of_2(align) || align >= PAGE_SIZE ||
    + *size < sizeof(unsigned int));
    +
    + nc->offset = ALIGN(old_offset, align);
    + return page_frag_alloc_prepare(nc, offset, size, gfp_mask);
    +}
    +
    +static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
    + unsigned int offset,
    + unsigned int size)
    +{
    + nc->pagecnt_bias--;
    + nc->offset = offset + size;
    +}
    +
    +static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
    + unsigned int offset,
    + unsigned int size)
    +{
    + nc->offset = offset + size;
    +}

    static inline void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
    unsigned int fragsz,
    diff --git a/mm/page_frag_alloc.c b/mm/page_frag_alloc.c
    index ae1393d0619a..c4d4fc10a850 100644
    --- a/mm/page_frag_alloc.c
    +++ b/mm/page_frag_alloc.c
    @@ -81,8 +81,8 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
    }
    EXPORT_SYMBOL(__page_frag_cache_drain);

    -void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
    - gfp_t gfp_mask)
    +void *__page_frag_alloc_prepare(struct page_frag_cache *nc, unsigned int fragsz,
    + gfp_t gfp_mask)
    {
    unsigned long size_mask;
    unsigned int offset;
    @@ -120,7 +120,7 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
    set_page_count(page, size_mask);
    nc->pagecnt_bias |= size_mask;

    - offset = 0;
    + nc->offset = 0;
    if (unlikely(fragsz > (size_mask + 1))) {
    /*
    * The caller is trying to allocate a fragment
    @@ -135,12 +135,9 @@ void *page_frag_alloc_va(struct page_frag_cache *nc, unsigned int fragsz,
    }
    }

    - nc->pagecnt_bias--;
    - nc->offset = offset + fragsz;
    -
    - return va + offset;
    + return va;
    }
    -EXPORT_SYMBOL(page_frag_alloc_va);
    +EXPORT_SYMBOL(__page_frag_alloc_prepare);

    /*
    * Frees a page fragment allocated out of either a compound or order 0 page.
    --
    2.33.0

    \
     
     \ /
      Last update: 2024-05-27 16:13    [W:4.654 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site