lkml.org 
[lkml]   [2022]   [May]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: [PATCH v11 0/6] Use pageblock_order for cma and alloc_contig_range alignment.
Date
On 20 May 2022, at 19:41, Qian Cai wrote:

> On Fri, May 20, 2022 at 05:56:52PM -0400, Zi Yan wrote:
>> Do you have the page information like refcount, map count, mapping, index, and
>> page flags? That would be more helpful. Thanks.
>
> page:fffffc200c7f8000 refcount:393 mapcount:1 mapping:0000000000000000 index:0xffffbb800 pfn:0x8039fe00
> head:fffffc200c7f8000 order:9 compound_mapcount:0 compound_pincount:0
> memcg:ffff40026005a000
> anon flags: 0xbfffc000009001c(uptodate|dirty|lru|head|swapbacked|node=0|zone=2|lastcpupid=0xffff)
> raw: 0bfffc000009001c fffffc2007b74048 fffffc2009c087c8 ffff08038dab9189
> raw: 0000000ffffbb800 0000000000000000 0000018900000000 ffff40026005a000
>

This is a PTE-mapped THP, unless <393 subpages are mapped, meaning extra refcount is present,
the page should be migratable. Even if it is not migratible due to the extra pin,
__alloc_contig_migrate_range() will return non-zero and bails out the code.
No idea why it caused the infinite loop.

>> I cannot reproduce it locally after hundreds of iterations of flip_mem.py on my
>> x86_64 VM and bare metal.
>>
>> What ARM machine are you using? I wonder if I am able to get one locally.
>
> Ampere Altra.

Sorry, I have no access to such a machine right now and cannot afford to buy one.

Can you try the patch below on top of linux-next to see if it fixes the infinite loop issue?
Thanks.

1. split_free_page() change is irrelevant but to make the code more robust.
2. using set_migratetype_isolate() in isolate_single_pageblock() is to properly mark the pageblock
MIGRATE_ISOLATE.
3. setting to-be-migrated page's pageblock to MIGRATE_ISOLATE is to avoid a possible race
that another thread might take the free page after migration.
4. off-by-one fix and no retry if free page is not found after migration like I added before.

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4dcfa0ceca45..ad8f73b00466 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1122,13 +1122,16 @@ void split_free_page(struct page *free_page,
unsigned long flags;
int free_page_order;

+ if (split_pfn_offset == 0)
+ return;
+
spin_lock_irqsave(&zone->lock, flags);
del_page_from_free_list(free_page, zone, order);
for (pfn = free_page_pfn;
pfn < free_page_pfn + (1UL << order);) {
int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);

- free_page_order = ffs(split_pfn_offset) - 1;
+ free_page_order = min(pfn ? __ffs(pfn) : order, __fls(split_pfn_offset));
__free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
mt, FPI_NONE);
pfn += 1UL << free_page_order;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b3f074d1682e..706915c9a380 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -283,6 +283,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* isolate_single_pageblock() -- tries to isolate a pageblock that might be
* within a free or in-use page.
* @boundary_pfn: pageblock-aligned pfn that a page might cross
+ * @flags: isolation flags
* @gfp_flags: GFP flags used for migrating pages
* @isolate_before: isolate the pageblock before the boundary_pfn
*
@@ -298,14 +299,15 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* either. The function handles this by splitting the free page or migrating
* the in-use page then splitting the free page.
*/
-static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
- bool isolate_before)
+static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
+ gfp_t gfp_flags, bool isolate_before)
{
unsigned char saved_mt;
unsigned long start_pfn;
unsigned long isolate_pageblock;
unsigned long pfn;
struct zone *zone;
+ int ret;

VM_BUG_ON(!IS_ALIGNED(boundary_pfn, pageblock_nr_pages));

@@ -325,7 +327,11 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
zone->zone_start_pfn);

saved_mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
- set_pageblock_migratetype(pfn_to_page(isolate_pageblock), MIGRATE_ISOLATE);
+ ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt, flags,
+ isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+
+ if (ret)
+ return ret;

/*
* Bail out early when the to-be-isolated pageblock does not form
@@ -374,7 +380,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
struct page *head = compound_head(page);
unsigned long head_pfn = page_to_pfn(head);

- if (head_pfn + nr_pages < boundary_pfn) {
+ if (head_pfn + nr_pages <= boundary_pfn) {
pfn = head_pfn + nr_pages;
continue;
}
@@ -386,7 +392,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
if (PageHuge(page) || PageLRU(page) || __PageMovable(page)) {
int order;
unsigned long outer_pfn;
- int ret;
+ int page_mt = get_pageblock_migratetype(page);
+ bool isolate_page = !is_migrate_isolate_page(page);
struct compact_control cc = {
.nr_migratepages = 0,
.order = -1,
@@ -399,9 +406,31 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
};
INIT_LIST_HEAD(&cc.migratepages);

+ /*
+ * XXX: mark the page as MIGRATE_ISOLATE so that
+ * no one else can grab the freed page after migration.
+ * Ideally, the page should be freed as two separate
+ * pages to be added into separate migratetype free
+ * lists.
+ */
+ if (isolate_page) {
+ ret = set_migratetype_isolate(page, page_mt,
+ flags, head_pfn, boundary_pfn - 1);
+ if (ret)
+ goto failed;
+ }
+
ret = __alloc_contig_migrate_range(&cc, head_pfn,
head_pfn + nr_pages);

+ /*
+ * restore the page's migratetype so that it can
+ * be split into separate migratetype free lists
+ * later.
+ */
+ if (isolate_page)
+ unset_migratetype_isolate(page, page_mt);
+
if (ret)
goto failed;
/*
@@ -417,10 +446,9 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
order = 0;
outer_pfn = pfn;
while (!PageBuddy(pfn_to_page(outer_pfn))) {
- if (++order >= MAX_ORDER) {
- outer_pfn = pfn;
- break;
- }
+ /* stop if we cannot find the free page */
+ if (++order >= MAX_ORDER)
+ goto failed;
outer_pfn &= ~0UL << order;
}
pfn = outer_pfn;
@@ -435,7 +463,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, gfp_t gfp_flags,
return 0;
failed:
/* restore the original migratetype */
- set_pageblock_migratetype(pfn_to_page(isolate_pageblock), saved_mt);
+ unset_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt);
return -EBUSY;
}

@@ -496,12 +524,12 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int ret;

/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, gfp_flags, false);
+ ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false);
if (ret)
return ret;

/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, gfp_flags, true);
+ ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
return ret;

--
Best Regards,
Yan, Zi
[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2022-05-22 18:55    [W:0.111 / U:25.776 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site