lkml.org 
[lkml]   [2023]   [Sep]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 04/11] hugetlb: Convert remove_pool_huge_page() to remove_pool_hugetlb_folio()
    Date
    From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

    Convert the callers to expect a folio and remove the unnecesary conversion
    back to a struct page.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
    Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
    ---
    mm/hugetlb.c | 29 +++++++++++++++--------------
    1 file changed, 15 insertions(+), 14 deletions(-)

    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index f768fe9aebad..278c8ae6a36c 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -1446,7 +1446,7 @@ static int hstate_next_node_to_alloc(struct hstate *h,
    }

    /*
    - * helper for remove_pool_huge_page() - return the previously saved
    + * helper for remove_pool_hugetlb_folio() - return the previously saved
    * node ["this node"] from which to free a huge page. Advance the
    * next node id whether or not we find a free huge page to free so
    * that the next attempt to free addresses the next node.
    @@ -2222,9 +2222,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
    * an additional call to free the page to low level allocators.
    * Called with hugetlb_lock locked.
    */
    -static struct page *remove_pool_huge_page(struct hstate *h,
    - nodemask_t *nodes_allowed,
    - bool acct_surplus)
    +static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
    + nodemask_t *nodes_allowed, bool acct_surplus)
    {
    int nr_nodes, node;
    struct folio *folio = NULL;
    @@ -2244,7 +2243,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
    }
    }

    - return &folio->page;
    + return folio;
    }

    /*
    @@ -2598,7 +2597,6 @@ static void return_unused_surplus_pages(struct hstate *h,
    unsigned long unused_resv_pages)
    {
    unsigned long nr_pages;
    - struct page *page;
    LIST_HEAD(page_list);

    lockdep_assert_held(&hugetlb_lock);
    @@ -2619,15 +2617,17 @@ static void return_unused_surplus_pages(struct hstate *h,
    * evenly across all nodes with memory. Iterate across these nodes
    * until we can no longer free unreserved surplus pages. This occurs
    * when the nodes with surplus pages have no free pages.
    - * remove_pool_huge_page() will balance the freed pages across the
    + * remove_pool_hugetlb_folio() will balance the freed pages across the
    * on-line nodes with memory and will handle the hstate accounting.
    */
    while (nr_pages--) {
    - page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
    - if (!page)
    + struct folio *folio;
    +
    + folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
    + if (!folio)
    goto out;

    - list_add(&page->lru, &page_list);
    + list_add(&folio->lru, &page_list);
    }

    out:
    @@ -3422,7 +3422,6 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
    nodemask_t *nodes_allowed)
    {
    unsigned long min_count, ret;
    - struct page *page;
    LIST_HEAD(page_list);
    NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);

    @@ -3542,11 +3541,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
    * Collect pages to be removed on list without dropping lock
    */
    while (min_count < persistent_huge_pages(h)) {
    - page = remove_pool_huge_page(h, nodes_allowed, 0);
    - if (!page)
    + struct folio *folio;
    +
    + folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
    + if (!folio)
    break;

    - list_add(&page->lru, &page_list);
    + list_add(&folio->lru, &page_list);
    }
    /* free the pages after dropping lock */
    spin_unlock_irq(&hugetlb_lock);
    --
    2.41.0
    \
     
     \ /
      Last update: 2023-09-05 23:47    [W:4.037 / U:0.800 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site