lkml.org 
[lkml]   [2022]   [Jan]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 5/7] mm: cma: use pageblock_order as the single alignment
    Date
    From: Zi Yan <ziy@nvidia.com>

    Now alloc_contig_range() works at pageblock granularity. Change CMA
    allocation, which uses alloc_contig_range(), to use pageblock_order
    alignment.

    Signed-off-by: Zi Yan <ziy@nvidia.com>
    ---
    include/linux/mmzone.h | 5 +----
    kernel/dma/contiguous.c | 2 +-
    mm/cma.c | 6 ++----
    mm/page_alloc.c | 6 +++---
    4 files changed, 7 insertions(+), 12 deletions(-)

    diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
    index 71b77aab748d..7bd3694b24b4 100644
    --- a/include/linux/mmzone.h
    +++ b/include/linux/mmzone.h
    @@ -54,10 +54,7 @@ enum migratetype {
    *
    * The way to use it is to change migratetype of a range of
    * pageblocks to MIGRATE_CMA which can be done by
    - * __free_pageblock_cma() function. What is important though
    - * is that a range of pageblocks must be aligned to
    - * MAX_ORDER_NR_PAGES should biggest page be bigger than
    - * a single pageblock.
    + * __free_pageblock_cma() function.
    */
    MIGRATE_CMA,
    #endif
    diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
    index 3d63d91cba5c..ac35b14b0786 100644
    --- a/kernel/dma/contiguous.c
    +++ b/kernel/dma/contiguous.c
    @@ -399,7 +399,7 @@ static const struct reserved_mem_ops rmem_cma_ops = {

    static int __init rmem_cma_setup(struct reserved_mem *rmem)
    {
    - phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
    + phys_addr_t align = PAGE_SIZE << pageblock_order;
    phys_addr_t mask = align - 1;
    unsigned long node = rmem->fdt_node;
    bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
    diff --git a/mm/cma.c b/mm/cma.c
    index bc9ca8f3c487..d171158bd418 100644
    --- a/mm/cma.c
    +++ b/mm/cma.c
    @@ -180,8 +180,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
    return -EINVAL;

    /* ensure minimal alignment required by mm core */
    - alignment = PAGE_SIZE <<
    - max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
    + alignment = PAGE_SIZE << pageblock_order;

    /* alignment should be aligned with order_per_bit */
    if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
    @@ -268,8 +267,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
    * migratetype page by page allocator's buddy algorithm. In the case,
    * you couldn't get a contiguous memory, which is not what we want.
    */
    - alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
    - max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
    + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << pageblock_order);
    if (fixed && base & (alignment - 1)) {
    ret = -EINVAL;
    pr_err("Region at %pa must be aligned to %pa bytes\n",
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index 6ed506234efa..a8ced1a00ce8 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -9008,8 +9008,8 @@ static inline void split_free_page_into_pageblocks(struct page *free_page,
    * be either of the two.
    * @gfp_mask: GFP mask to use during compaction
    *
    - * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
    - * aligned. The PFN range must belong to a single zone.
    + * The PFN range does not have to be pageblock aligned. The PFN range must
    + * belong to a single zone.
    *
    * The first thing this routine does is attempt to MIGRATE_ISOLATE all
    * pageblocks in the range. Once isolated, the pageblocks should not
    @@ -9125,7 +9125,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
    ret = 0;

    /*
    - * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
    + * Pages from [start, end) are within a pageblock_nr_pages
    * aligned blocks that are marked as MIGRATE_ISOLATE. What's
    * more, all pages in [start, end) are free in page allocator.
    * What we are going to do is to allocate all pages from
    --
    2.34.1
    \
     
     \ /
      Last update: 2022-01-19 20:08    [W:4.095 / U:0.328 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site