lkml.org 
[lkml]   [2022]   [Nov]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH 3/9] mm/hugetlb_cgroup: convert set_hugetlb_cgroup*() to folios
From


On 10/31/22 9:38 AM, Mike Kravetz wrote:
> On 10/13/22 20:12, Sidhartha Kumar wrote:
>> Allows __prep_new_huge_page() to operate on a folio by converting
>> set_hugetlb_cgroup*() to take in a folio.
>>
>> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
>> h->nr_huge_pages_node[nid]++;
>> }
>>
>> -static void __prep_new_huge_page(struct hstate *h, struct page *page)
>> +static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
>> {
>> - hugetlb_vmemmap_optimize(h, page);
>> - INIT_LIST_HEAD(&page->lru);
>> - set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
>> - hugetlb_set_page_subpool(page, NULL);
>> - set_hugetlb_cgroup(page, NULL);
>> - set_hugetlb_cgroup_rsvd(page, NULL);
>> + hugetlb_vmemmap_optimize(h, &folio->page);
>> + INIT_LIST_HEAD(&folio->lru);
>> + folio->_folio_dtor = HUGETLB_PAGE_DTOR;
> Seems like we should have a routine 'set_folio_dtor' that has the same
> functionality as set_compound_page_dtor. Here, we loose the check for a
> valid DTOR value (although not terribly valuable).

I agree with the need for a 'set_folio_dtor' routine, I'll send out a
patch for that as well.

> Not required for this patch, but something to note.
>
>> + hugetlb_set_folio_subpool(folio, NULL);
>> + set_hugetlb_cgroup(folio, NULL);
>> + set_hugetlb_cgroup_rsvd(folio, NULL);
>> }
>>
>> static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>> {
>> - __prep_new_huge_page(h, page);
>> + struct folio *folio = page_folio(page);
>> +
>> + __prep_new_hugetlb_folio(h, folio);
>> spin_lock_irq(&hugetlb_lock);
>> __prep_account_new_huge_page(h, nid);
>> spin_unlock_irq(&hugetlb_lock);
>> @@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> struct list_head *list)
>> {
>> gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>> - int nid = page_to_nid(old_page);
>> + struct folio *old_folio = page_folio(old_page);
>> + int nid = folio_nid(old_folio);
>> struct page *new_page;
>> + struct folio *new_folio;
>> int ret = 0;
>>
>> /*
>> @@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
>> if (!new_page)
>> return -ENOMEM;
>> - __prep_new_huge_page(h, new_page);
>> + new_folio = page_folio(new_page);
>> + __prep_new_hugetlb_folio(h, new_folio);
>>
>> retry:
>> spin_lock_irq(&hugetlb_lock);
>> - if (!PageHuge(old_page)) {
>> + if (!folio_test_hugetlb(old_folio)) {
>> /*
>> * Freed from under us. Drop new_page too.
>> */
>> goto free_new;
>> - } else if (page_count(old_page)) {
>> + } else if (folio_ref_count(old_folio)) {
>> /*
>> * Someone has grabbed the page, try to isolate it here.
>> * Fail with -EBUSY if not possible.
>> @@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> ret = isolate_hugetlb(old_page, list);
>> spin_lock_irq(&hugetlb_lock);
>> goto free_new;
>> - } else if (!HPageFreed(old_page)) {
>> + } else if (!folio_test_hugetlb(old_folio)) {
> Should that be?
> } else if (!folio_test_hugetlb_freed(old_folio)) {

Yes good catch, I will fix in a v2.

Thanks,
Sidhartha Kumar

>

\
 
 \ /
  Last update: 2022-11-01 17:44    [W:0.088 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site