lkml.org 
[lkml]   [2023]   [Oct]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v3 2/5] mm/khugepaged: Convert hpage_collapse_scan_pmd() to use folios
On Fri, Oct 20, 2023 at 11:34 AM Vishal Moola (Oracle)
<vishal.moola@gmail.com> wrote:
>
> Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel
> text.
>
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>

Reviewed-by: Yang Shi <shy828301@gmail.com>

> ---
> mm/khugepaged.c | 20 ++++++++++----------
> 1 file changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 500756604488..6c4b5af43371 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1248,6 +1248,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> int result = SCAN_FAIL, referenced = 0;
> int none_or_zero = 0, shared = 0;
> struct page *page = NULL;
> + struct folio *folio = NULL;
> unsigned long _address;
> spinlock_t *ptl;
> int node = NUMA_NO_NODE, unmapped = 0;
> @@ -1334,29 +1335,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> }
> }
>
> - page = compound_head(page);
> -
> + folio = page_folio(page);
> /*
> * Record which node the original page is from and save this
> * information to cc->node_load[].
> * Khugepaged will allocate hugepage from the node has the max
> * hit record.
> */
> - node = page_to_nid(page);
> + node = folio_nid(folio);
> if (hpage_collapse_scan_abort(node, cc)) {
> result = SCAN_SCAN_ABORT;
> goto out_unmap;
> }
> cc->node_load[node]++;
> - if (!PageLRU(page)) {
> + if (!folio_test_lru(folio)) {
> result = SCAN_PAGE_LRU;
> goto out_unmap;
> }
> - if (PageLocked(page)) {
> + if (folio_test_locked(folio)) {
> result = SCAN_PAGE_LOCK;
> goto out_unmap;
> }
> - if (!PageAnon(page)) {
> + if (!folio_test_anon(folio)) {
> result = SCAN_PAGE_ANON;
> goto out_unmap;
> }
> @@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> * has excessive GUP pins (i.e. 512). Anyway the same check
> * will be done again later the risk seems low.
> */
> - if (!is_refcount_suitable(page)) {
> + if (!is_refcount_suitable(&folio->page)) {
> result = SCAN_PAGE_COUNT;
> goto out_unmap;
> }
> @@ -1381,8 +1381,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> * enough young pte to justify collapsing the page
> */
> if (cc->is_khugepaged &&
> - (pte_young(pteval) || page_is_young(page) ||
> - PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
> + (pte_young(pteval) || folio_test_young(folio) ||
> + folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
> address)))
> referenced++;
> }
> @@ -1404,7 +1404,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> *mmap_locked = false;
> }
> out:
> - trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
> + trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
> none_or_zero, result, unmapped);
> return result;
> }
> --
> 2.40.1
>

\
 
 \ /
  Last update: 2023-10-24 19:38    [W:0.147 / U:1.972 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site