lkml.org 
[lkml]   [2023]   [Feb]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 31/34] filemap: Add filemap_map_folio_range()
    Date
    From: Yin Fengwei <fengwei.yin@intel.com>

    filemap_map_folio_range() maps partial/full folio. Comparing to original
    filemap_map_pages(), it updates refcount once per folio instead of per
    page and gets minor performance improvement for large folio.

    With a will-it-scale.page_fault3 like app (change file write
    fault testing to read fault testing. Trying to upstream it to
    will-it-scale at [1]), got 2% performance gain on a 48C/96T
    Cascade Lake test box with 96 processes running against xfs.

    [1]: https://github.com/antonblanchard/will-it-scale/pull/37

    Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    ---
    mm/filemap.c | 98 +++++++++++++++++++++++++++++-----------------------
    1 file changed, 54 insertions(+), 44 deletions(-)

    diff --git a/mm/filemap.c b/mm/filemap.c
    index 2723104cc06a..db86e459dde6 100644
    --- a/mm/filemap.c
    +++ b/mm/filemap.c
    @@ -2202,16 +2202,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
    }
    EXPORT_SYMBOL(filemap_get_folios);

    -static inline
    -bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
    -{
    - if (!folio_test_large(folio) || folio_test_hugetlb(folio))
    - return false;
    - if (index >= max)
    - return false;
    - return index < folio->index + folio_nr_pages(folio) - 1;
    -}
    -
    /**
    * filemap_get_folios_contig - Get a batch of contiguous folios
    * @mapping: The address_space to search
    @@ -3483,6 +3473,53 @@ static inline struct folio *next_map_page(struct address_space *mapping,
    mapping, xas, end_pgoff);
    }

    +/*
    + * Map page range [start_page, start_page + nr_pages) of folio.
    + * start_page is gotten from start by folio_page(folio, start)
    + */
    +static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
    + struct folio *folio, unsigned long start,
    + unsigned long addr, unsigned int nr_pages)
    +{
    + vm_fault_t ret = 0;
    + struct vm_area_struct *vma = vmf->vma;
    + struct file *file = vma->vm_file;
    + struct page *page = folio_page(folio, start);
    + unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
    + unsigned int ref_count = 0, count = 0;
    +
    + do {
    + if (PageHWPoison(page))
    + continue;
    +
    + if (mmap_miss > 0)
    + mmap_miss--;
    +
    + /*
    + * NOTE: If there're PTE markers, we'll leave them to be
    + * handled in the specific fault path, and it'll prohibit the
    + * fault-around logic.
    + */
    + if (!pte_none(*vmf->pte))
    + continue;
    +
    + if (vmf->address == addr)
    + ret = VM_FAULT_NOPAGE;
    +
    + ref_count++;
    + do_set_pte(vmf, page, addr);
    + update_mmu_cache(vma, addr, vmf->pte);
    + } while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
    +
    + /* Restore the vmf->pte */
    + vmf->pte -= nr_pages;
    +
    + folio_ref_add(folio, ref_count);
    + WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
    +
    + return ret;
    +}
    +
    vm_fault_t filemap_map_pages(struct vm_fault *vmf,
    pgoff_t start_pgoff, pgoff_t end_pgoff)
    {
    @@ -3493,9 +3530,9 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
    unsigned long addr;
    XA_STATE(xas, &mapping->i_pages, start_pgoff);
    struct folio *folio;
    - struct page *page;
    unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
    vm_fault_t ret = 0;
    + int nr_pages = 0;

    rcu_read_lock();
    folio = first_map_page(mapping, &xas, end_pgoff);
    @@ -3510,45 +3547,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
    addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
    vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
    do {
    -again:
    - page = folio_file_page(folio, xas.xa_index);
    - if (PageHWPoison(page))
    - goto unlock;
    -
    - if (mmap_miss > 0)
    - mmap_miss--;
    + unsigned long end;

    addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
    vmf->pte += xas.xa_index - last_pgoff;
    last_pgoff = xas.xa_index;
    + end = folio->index + folio_nr_pages(folio) - 1;
    + nr_pages = min(end, end_pgoff) - xas.xa_index + 1;

    - /*
    - * NOTE: If there're PTE markers, we'll leave them to be
    - * handled in the specific fault path, and it'll prohibit the
    - * fault-around logic.
    - */
    - if (!pte_none(*vmf->pte))
    - goto unlock;
    + ret |= filemap_map_folio_range(vmf, folio,
    + xas.xa_index - folio->index, addr, nr_pages);
    + xas.xa_index += nr_pages;

    - /* We're about to handle the fault */
    - if (vmf->address == addr)
    - ret = VM_FAULT_NOPAGE;
    -
    - do_set_pte(vmf, page, addr);
    - /* no need to invalidate: a not-present page won't be cached */
    - update_mmu_cache(vma, addr, vmf->pte);
    - if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
    - xas.xa_index++;
    - folio_ref_inc(folio);
    - goto again;
    - }
    - folio_unlock(folio);
    - continue;
    -unlock:
    - if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
    - xas.xa_index++;
    - goto again;
    - }
    folio_unlock(folio);
    folio_put(folio);
    } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
    --
    2.39.1
    \
     
     \ /
      Last update: 2023-03-27 00:38    [W:4.437 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site