lkml.org 
[lkml]   [2022]   [Nov]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH mm-unstable 07/10] mm/hugetlb: convert enqueue_huge_page() to folios
Date
Convert callers of enqueue_huge_page() to pass in a folio, function is
renamed to enqueue_hugetlb_folio().

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
mm/hugetlb.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7382c162dbcd..ebb98c1af2fb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1119,17 +1119,17 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
return false;
}

-static void enqueue_huge_page(struct hstate *h, struct page *page)
+static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
{
- int nid = page_to_nid(page);
+ int nid = folio_nid(folio);

lockdep_assert_held(&hugetlb_lock);
- VM_BUG_ON_PAGE(page_count(page), page);
+ VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);

- list_move(&page->lru, &h->hugepage_freelists[nid]);
+ list_move(&folio->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
- SetHPageFreed(page);
+ folio_set_hugetlb_freed(folio);
}

static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1540,7 +1540,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
return;

arch_clear_hugepage_flags(&folio->page);
- enqueue_huge_page(h, &folio->page);
+ enqueue_hugetlb_folio(h, folio);
}

static void __update_and_free_page(struct hstate *h, struct page *page)
@@ -1752,7 +1752,7 @@ void free_huge_page(struct page *page)
update_and_free_hugetlb_folio(h, folio, true);
} else {
arch_clear_hugepage_flags(page);
- enqueue_huge_page(h, page);
+ enqueue_hugetlb_folio(h, folio);
spin_unlock_irqrestore(&hugetlb_lock, flags);
}
}
@@ -2427,7 +2427,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
if ((--needed) < 0)
break;
/* Add the page to the hugetlb allocator */
- enqueue_huge_page(h, page);
+ enqueue_hugetlb_folio(h, page_folio(page));
}
free:
spin_unlock_irq(&hugetlb_lock);
@@ -2793,8 +2793,8 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* Ok, old_page is still a genuine free hugepage. Remove it from
* the freelist and decrease the counters. These will be
* incremented again when calling __prep_account_new_huge_page()
- * and enqueue_huge_page() for new_page. The counters will remain
- * stable since this happens under the lock.
+ * and enqueue_hugetlb_folio() for new_folio. The counters will
+ * remain stable since this happens under the lock.
*/
remove_hugetlb_folio(h, old_folio, false);

@@ -2803,7 +2803,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* earlier. It can be directly added to the pool free list.
*/
__prep_account_new_huge_page(h, nid);
- enqueue_huge_page(h, new_page);
+ enqueue_hugetlb_folio(h, new_folio);

/*
* Pages have been replaced, we can safely free the old one.
--
2.38.1
\
 
 \ /
  Last update: 2022-11-15 22:24    [W:1.969 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site