lkml.org 
[lkml]   [2022]   [Jan]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 3/4] mm/khugepaged: unify collapse pmd clear, flush and free
    Date
    Unify the code that flushes, clears pmd entry, and frees the PTE table
    level into a new function collapse_and_free_pmd().

    This clean-up is useful as in the next patch we will add another call to
    this function to iterate through PTE prior to freeing the level for page
    table check.

    Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
    ---
    mm/khugepaged.c | 32 ++++++++++++++++----------------
    1 file changed, 16 insertions(+), 16 deletions(-)

    diff --git a/mm/khugepaged.c b/mm/khugepaged.c
    index 35f14d0a00a6..440112355ffe 100644
    --- a/mm/khugepaged.c
    +++ b/mm/khugepaged.c
    @@ -1416,6 +1416,17 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
    return 0;
    }

    +static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
    + unsigned long addr, pmd_t *pmdp)
    +{
    + spinlock_t *ptl = pmd_lock(vma->vm_mm, pmdp);
    + pmd_t pmd = pmdp_collapse_flush(vma, addr, pmdp);
    +
    + spin_unlock(ptl);
    + mm_dec_nr_ptes(mm);
    + pte_free(mm, pmd_pgtable(pmd));
    +}
    +
    /**
    * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
    * address haddr.
    @@ -1433,7 +1444,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
    struct vm_area_struct *vma = find_vma(mm, haddr);
    struct page *hpage;
    pte_t *start_pte, *pte;
    - pmd_t *pmd, _pmd;
    + pmd_t *pmd;
    spinlock_t *ptl;
    int count = 0;
    int i;
    @@ -1509,12 +1520,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
    }

    /* step 4: collapse pmd */
    - ptl = pmd_lock(vma->vm_mm, pmd);
    - _pmd = pmdp_collapse_flush(vma, haddr, pmd);
    - spin_unlock(ptl);
    - mm_dec_nr_ptes(mm);
    - pte_free(mm, pmd_pgtable(_pmd));
    -
    + collapse_and_free_pmd(mm, vma, haddr, pmd);
    drop_hpage:
    unlock_page(hpage);
    put_page(hpage);
    @@ -1552,7 +1558,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
    struct vm_area_struct *vma;
    struct mm_struct *mm;
    unsigned long addr;
    - pmd_t *pmd, _pmd;
    + pmd_t *pmd;

    i_mmap_lock_write(mapping);
    vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
    @@ -1591,14 +1597,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
    * reverse order. Trylock is a way to avoid deadlock.
    */
    if (mmap_write_trylock(mm)) {
    - if (!khugepaged_test_exit(mm)) {
    - spinlock_t *ptl = pmd_lock(mm, pmd);
    - /* assume page table is clear */
    - _pmd = pmdp_collapse_flush(vma, addr, pmd);
    - spin_unlock(ptl);
    - mm_dec_nr_ptes(mm);
    - pte_free(mm, pmd_pgtable(_pmd));
    - }
    + if (!khugepaged_test_exit(mm))
    + collapse_and_free_pmd(mm, vma, addr, pmd);
    mmap_write_unlock(mm);
    } else {
    /* Try again later */
    --
    2.35.0.rc0.227.g00780c9af4-goog
    \
     
     \ /
      Last update: 2022-01-26 07:06    [W:4.021 / U:0.084 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site