lkml.org 
[lkml]   [2023]   [Aug]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 2/4] mm: migrate: move the numamigrate_isolate_page() into do_numa_page()
    Date
    Move the numamigrate_isolate_page() into do_numa_page() to simplify the
    migrate_misplaced_page(), which now only focuses on page migration, and
    it also serves as a preparation for supporting batch migration for
    migrate_misplaced_page().

    While we are at it, change the numamigrate_isolate_page() to boolean
    type to make the return value more clear.

    Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
    ---
    include/linux/migrate.h | 6 ++++++
    mm/huge_memory.c | 7 +++++++
    mm/memory.c | 7 +++++++
    mm/migrate.c | 22 +++++++---------------
    4 files changed, 27 insertions(+), 15 deletions(-)

    diff --git a/include/linux/migrate.h b/include/linux/migrate.h
    index 711dd9412561..ddcd62ec2c12 100644
    --- a/include/linux/migrate.h
    +++ b/include/linux/migrate.h
    @@ -144,12 +144,18 @@ const struct movable_operations *page_movable_ops(struct page *page)
    #ifdef CONFIG_NUMA_BALANCING
    int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    int node);
    +bool numamigrate_isolate_page(pg_data_t *pgdat, struct page *page);
    #else
    static inline int migrate_misplaced_page(struct page *page,
    struct vm_area_struct *vma, int node)
    {
    return -EAGAIN; /* can't migrate now */
    }
    +
    +static inline bool numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    +{
    + return false;
    +}
    #endif /* CONFIG_NUMA_BALANCING */

    #ifdef CONFIG_MIGRATION
    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index 4a9b34a89854..07149ead11e4 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -1496,6 +1496,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
    int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
    bool migrated = false, writable = false;
    int flags = 0;
    + pg_data_t *pgdat;

    vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
    if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
    @@ -1545,6 +1546,12 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
    goto migrate_fail;
    }

    + pgdat = NODE_DATA(target_nid);
    + if (!numamigrate_isolate_page(pgdat, page)) {
    + put_page(page);
    + goto migrate_fail;
    + }
    +
    migrated = migrate_misplaced_page(page, vma, target_nid);
    if (migrated) {
    flags |= TNF_MIGRATED;
    diff --git a/mm/memory.c b/mm/memory.c
    index fc6f6b7a70e1..4e451b041488 100644
    --- a/mm/memory.c
    +++ b/mm/memory.c
    @@ -4769,6 +4769,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
    int target_nid;
    pte_t pte, old_pte;
    int flags = 0;
    + pg_data_t *pgdat;

    /*
    * The "pte" at this point cannot be used safely without
    @@ -4844,6 +4845,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
    goto migrate_fail;
    }

    + pgdat = NODE_DATA(target_nid);
    + if (!numamigrate_isolate_page(pgdat, page)) {
    + put_page(page);
    + goto migrate_fail;
    + }
    +
    /* Migrate to the requested node */
    if (migrate_misplaced_page(page, vma, target_nid)) {
    page_nid = target_nid;
    diff --git a/mm/migrate.c b/mm/migrate.c
    index 9cc98fb1d6ec..0b2b69a2a7ab 100644
    --- a/mm/migrate.c
    +++ b/mm/migrate.c
    @@ -2478,7 +2478,7 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
    return __folio_alloc_node(gfp, order, nid);
    }

    -static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    +bool numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    {
    int nr_pages = thp_nr_pages(page);
    int order = compound_order(page);
    @@ -2496,11 +2496,11 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    break;
    }
    wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
    - return 0;
    + return false;
    }

    if (!isolate_lru_page(page))
    - return 0;
    + return false;

    mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
    nr_pages);
    @@ -2511,7 +2511,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    * disappearing underneath us during migration.
    */
    put_page(page);
    - return 1;
    + return true;
    }

    /*
    @@ -2523,16 +2523,12 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    int node)
    {
    pg_data_t *pgdat = NODE_DATA(node);
    - int isolated;
    + int migrated = 1;
    int nr_remaining;
    unsigned int nr_succeeded;
    LIST_HEAD(migratepages);
    int nr_pages = thp_nr_pages(page);

    - isolated = numamigrate_isolate_page(pgdat, page);
    - if (!isolated)
    - goto out;
    -
    list_add(&page->lru, &migratepages);
    nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
    NULL, node, MIGRATE_ASYNC,
    @@ -2544,7 +2540,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    page_is_file_lru(page), -nr_pages);
    putback_lru_page(page);
    }
    - isolated = 0;
    + migrated = 0;
    }
    if (nr_succeeded) {
    count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
    @@ -2553,11 +2549,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    nr_succeeded);
    }
    BUG_ON(!list_empty(&migratepages));
    - return isolated;
    -
    -out:
    - put_page(page);
    - return 0;
    + return migrated;
    }
    #endif /* CONFIG_NUMA_BALANCING */
    #endif /* CONFIG_NUMA */
    --
    2.39.3
    \
     
     \ /
      Last update: 2023-08-22 02:55    [W:9.369 / U:0.460 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site