lkml.org 
[lkml]   [2024]   [Feb]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v6 02/18] mm: thp: Batch-collapse PMD with set_ptes()
    Date
    Refactor __split_huge_pmd_locked() so that a present PMD can be
    collapsed to PTEs in a single batch using set_ptes().

    This should improve performance a little bit, but the real motivation is
    to remove the need for the arm64 backend to have to fold the contpte
    entries. Instead, since the ptes are set as a batch, the contpte blocks
    can be initially set up pre-folded (once the arm64 contpte support is
    added in the next few patches). This leads to noticeable performance
    improvement during split.

    Acked-by: David Hildenbrand <david@redhat.com>
    Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
    ---
    mm/huge_memory.c | 58 +++++++++++++++++++++++++++---------------------
    1 file changed, 33 insertions(+), 25 deletions(-)

    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index 016e20bd813e..14888b15121e 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -2579,15 +2579,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,

    pte = pte_offset_map(&_pmd, haddr);
    VM_BUG_ON(!pte);
    - for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
    - pte_t entry;
    - /*
    - * Note that NUMA hinting access restrictions are not
    - * transferred to avoid any possibility of altering
    - * permissions across VMAs.
    - */
    - if (freeze || pmd_migration) {
    +
    + /*
    + * Note that NUMA hinting access restrictions are not transferred to
    + * avoid any possibility of altering permissions across VMAs.
    + */
    + if (freeze || pmd_migration) {
    + for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
    + pte_t entry;
    swp_entry_t swp_entry;
    +
    if (write)
    swp_entry = make_writable_migration_entry(
    page_to_pfn(page + i));
    @@ -2606,25 +2607,32 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
    entry = pte_swp_mksoft_dirty(entry);
    if (uffd_wp)
    entry = pte_swp_mkuffd_wp(entry);
    - } else {
    - entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
    - if (write)
    - entry = pte_mkwrite(entry, vma);
    - if (!young)
    - entry = pte_mkold(entry);
    - /* NOTE: this may set soft-dirty too on some archs */
    - if (dirty)
    - entry = pte_mkdirty(entry);
    - if (soft_dirty)
    - entry = pte_mksoft_dirty(entry);
    - if (uffd_wp)
    - entry = pte_mkuffd_wp(entry);
    +
    + VM_WARN_ON(!pte_none(ptep_get(pte + i)));
    + set_pte_at(mm, addr, pte + i, entry);
    }
    - VM_BUG_ON(!pte_none(ptep_get(pte)));
    - set_pte_at(mm, addr, pte, entry);
    - pte++;
    + } else {
    + pte_t entry;
    +
    + entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
    + if (write)
    + entry = pte_mkwrite(entry, vma);
    + if (!young)
    + entry = pte_mkold(entry);
    + /* NOTE: this may set soft-dirty too on some archs */
    + if (dirty)
    + entry = pte_mkdirty(entry);
    + if (soft_dirty)
    + entry = pte_mksoft_dirty(entry);
    + if (uffd_wp)
    + entry = pte_mkuffd_wp(entry);
    +
    + for (i = 0; i < HPAGE_PMD_NR; i++)
    + VM_WARN_ON(!pte_none(ptep_get(pte + i)));
    +
    + set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
    }
    - pte_unmap(pte - 1);
    + pte_unmap(pte);

    if (!pmd_migration)
    folio_remove_rmap_pmd(folio, page, vma);
    --
    2.25.1

    \
     
     \ /
      Last update: 2024-05-27 15:04    [W:4.024 / U:0.160 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site