lkml.org 
[lkml]   [2023]   [Dec]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH v2 2/5] hugetlb: split hugetlb_hstate_alloc_pages
    Date
    1G and 2M huge pages have different allocation and initialization logic,
    which leads to subtle differences in parallelization. Therefore, it is
    appropriate to split hugetlb_hstate_alloc_pages into gigantic and
    non-gigantic.

    This patch has no functional changes.

    Signed-off-by: Gang Li <gang.li@linux.dev>
    ---
    mm/hugetlb.c | 86 +++++++++++++++++++++++++++-------------------------
    1 file changed, 45 insertions(+), 41 deletions(-)

    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index 252d6866a0af8..8de1653fc4c4f 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -3502,6 +3502,47 @@ static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, st
    }
    }

    +static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hstate *h)
    +{
    + unsigned long i;
    +
    + for (i = 0; i < h->max_huge_pages; ++i) {
    + /*
    + * gigantic pages not added to list as they are not
    + * added to pools now.
    + */
    + if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
    + break;
    + cond_resched();
    + }
    +
    + return i;
    +}
    +
    +static unsigned long __init hugetlb_hstate_alloc_pages_non_gigantic(struct hstate *h)
    +{
    + unsigned long i;
    + struct folio *folio;
    + LIST_HEAD(folio_list);
    + nodemask_t node_alloc_noretry;
    +
    + /* Bit mask controlling how hard we retry per-node allocations.*/
    + nodes_clear(node_alloc_noretry);
    +
    + for (i = 0; i < h->max_huge_pages; ++i) {
    + folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
    + &node_alloc_noretry);
    + if (!folio)
    + break;
    + list_add(&folio->lru, &folio_list);
    + cond_resched();
    + }
    +
    + prep_and_add_allocated_folios(h, &folio_list);
    +
    + return i;
    +}
    +
    /*
    * NOTE: this routine is called in different contexts for gigantic and
    * non-gigantic pages.
    @@ -3515,10 +3556,7 @@ static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, st
    */
    static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
    {
    - unsigned long i;
    - struct folio *folio;
    - LIST_HEAD(folio_list);
    - nodemask_t *node_alloc_noretry;
    + unsigned long allocated;

    /* skip gigantic hugepages allocation if hugetlb_cma enabled */
    if (hstate_is_gigantic(h) && hugetlb_cma_size) {
    @@ -3532,46 +3570,12 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)

    /* below will do all node balanced alloc */
    if (!hstate_is_gigantic(h)) {
    - /*
    - * Bit mask controlling how hard we retry per-node allocations.
    - * Ignore errors as lower level routines can deal with
    - * node_alloc_noretry == NULL. If this kmalloc fails at boot
    - * time, we are likely in bigger trouble.
    - */
    - node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
    - GFP_KERNEL);
    + allocated = hugetlb_hstate_alloc_pages_non_gigantic(h);
    } else {
    - /* allocations done at boot time */
    - node_alloc_noretry = NULL;
    - }
    -
    - /* bit mask controlling how hard we retry per-node allocations */
    - if (node_alloc_noretry)
    - nodes_clear(*node_alloc_noretry);
    -
    - for (i = 0; i < h->max_huge_pages; ++i) {
    - if (hstate_is_gigantic(h)) {
    - /*
    - * gigantic pages not added to list as they are not
    - * added to pools now.
    - */
    - if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
    - break;
    - } else {
    - folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
    - node_alloc_noretry);
    - if (!folio)
    - break;
    - list_add(&folio->lru, &folio_list);
    - }
    - cond_resched();
    + allocated = hugetlb_hstate_alloc_pages_gigantic(h);
    }

    - /* list will be empty if hstate_is_gigantic */
    - prep_and_add_allocated_folios(h, &folio_list);
    -
    - hugetlb_hstate_alloc_pages_report(i, h);
    - kfree(node_alloc_noretry);
    + hugetlb_hstate_alloc_pages_report(allocated, h);
    }

    static void __init hugetlb_init_hstates(void)
    --
    2.30.2
    \
     
     \ /
      Last update: 2023-12-08 03:53    [W:2.229 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site