lkml.org 
[lkml]   [2023]   [Jun]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v2 01/32] mm: use pmdp_get_lockless() without surplus barrier()
    Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
    reliable result with PAE (or READ_ONCE as before without PAE); and remove
    the unnecessary extra barrier()s which got left behind in its callers.

    HOWEVER: Note the small print in linux/pgtable.h, where it was designed
    specifically for fast GUP, and depends on interrupts being disabled for
    its full guarantee: most callers which have been added (here and before)
    do NOT have interrupts disabled, so there is still some need for caution.

    Signed-off-by: Hugh Dickins <hughd@google.com>
    Acked-by: Yu Zhao <yuzhao@google.com>
    Acked-by: Peter Xu <peterx@redhat.com>
    ---
    fs/userfaultfd.c | 10 +---------
    include/linux/pgtable.h | 17 -----------------
    mm/gup.c | 6 +-----
    mm/hmm.c | 2 +-
    mm/khugepaged.c | 5 -----
    mm/ksm.c | 3 +--
    mm/memory.c | 14 ++------------
    mm/mprotect.c | 5 -----
    mm/page_vma_mapped.c | 2 +-
    9 files changed, 7 insertions(+), 57 deletions(-)

    diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
    index 0fd96d6e39ce..f7a0817b1ec0 100644
    --- a/fs/userfaultfd.c
    +++ b/fs/userfaultfd.c
    @@ -349,15 +349,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
    if (!pud_present(*pud))
    goto out;
    pmd = pmd_offset(pud, address);
    - /*
    - * READ_ONCE must function as a barrier with narrower scope
    - * and it must be equivalent to:
    - * _pmd = *pmd; barrier();
    - *
    - * This is to deal with the instability (as in
    - * pmd_trans_unstable) of the pmd.
    - */
    - _pmd = READ_ONCE(*pmd);
    + _pmd = pmdp_get_lockless(pmd);
    if (pmd_none(_pmd))
    goto out;

    diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
    index c5a51481bbb9..8ec27fe69dc8 100644
    --- a/include/linux/pgtable.h
    +++ b/include/linux/pgtable.h
    @@ -1344,23 +1344,6 @@ static inline int pud_trans_unstable(pud_t *pud)
    static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
    {
    pmd_t pmdval = pmdp_get_lockless(pmd);
    - /*
    - * The barrier will stabilize the pmdval in a register or on
    - * the stack so that it will stop changing under the code.
    - *
    - * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
    - * pmdp_get_lockless is allowed to return a not atomic pmdval
    - * (for example pointing to an hugepage that has never been
    - * mapped in the pmd). The below checks will only care about
    - * the low part of the pmd with 32bit PAE x86 anyway, with the
    - * exception of pmd_none(). So the important thing is that if
    - * the low part of the pmd is found null, the high part will
    - * be also null or the pmd_none() check below would be
    - * confused.
    - */
    -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    - barrier();
    -#endif
    /*
    * !pmd_present() checks for pmd migration entries
    *
    diff --git a/mm/gup.c b/mm/gup.c
    index bbe416236593..3bd5d3854c51 100644
    --- a/mm/gup.c
    +++ b/mm/gup.c
    @@ -653,11 +653,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
    struct mm_struct *mm = vma->vm_mm;

    pmd = pmd_offset(pudp, address);
    - /*
    - * The READ_ONCE() will stabilize the pmdval in a register or
    - * on the stack so that it will stop changing under the code.
    - */
    - pmdval = READ_ONCE(*pmd);
    + pmdval = pmdp_get_lockless(pmd);
    if (pmd_none(pmdval))
    return no_page_table(vma, flags);
    if (!pmd_present(pmdval))
    diff --git a/mm/hmm.c b/mm/hmm.c
    index 6a151c09de5e..e23043345615 100644
    --- a/mm/hmm.c
    +++ b/mm/hmm.c
    @@ -332,7 +332,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
    pmd_t pmd;

    again:
    - pmd = READ_ONCE(*pmdp);
    + pmd = pmdp_get_lockless(pmdp);
    if (pmd_none(pmd))
    return hmm_vma_walk_hole(start, end, -1, walk);

    diff --git a/mm/khugepaged.c b/mm/khugepaged.c
    index 6b9d39d65b73..732f9ac393fc 100644
    --- a/mm/khugepaged.c
    +++ b/mm/khugepaged.c
    @@ -961,11 +961,6 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
    return SCAN_PMD_NULL;

    pmde = pmdp_get_lockless(*pmd);
    -
    -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    - /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
    - barrier();
    -#endif
    if (pmd_none(pmde))
    return SCAN_PMD_NONE;
    if (!pmd_present(pmde))
    diff --git a/mm/ksm.c b/mm/ksm.c
    index 0156bded3a66..df2aa281d49d 100644
    --- a/mm/ksm.c
    +++ b/mm/ksm.c
    @@ -1194,8 +1194,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
    * without holding anon_vma lock for write. So when looking for a
    * genuine pmde (in which to find pte), test present and !THP together.
    */
    - pmde = *pmd;
    - barrier();
    + pmde = pmdp_get_lockless(pmd);
    if (!pmd_present(pmde) || pmd_trans_huge(pmde))
    goto out;

    diff --git a/mm/memory.c b/mm/memory.c
    index f69fbc251198..2eb54c0d5d3c 100644
    --- a/mm/memory.c
    +++ b/mm/memory.c
    @@ -4925,18 +4925,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
    * So now it's safe to run pte_offset_map().
    */
    vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
    - vmf->orig_pte = *vmf->pte;
    + vmf->orig_pte = ptep_get_lockless(vmf->pte);
    vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;

    - /*
    - * some architectures can have larger ptes than wordsize,
    - * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
    - * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
    - * accesses. The code below just needs a consistent view
    - * for the ifs and we later double check anyway with the
    - * ptl lock held. So here a barrier will do.
    - */
    - barrier();
    if (pte_none(vmf->orig_pte)) {
    pte_unmap(vmf->pte);
    vmf->pte = NULL;
    @@ -5060,9 +5051,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
    if (!(ret & VM_FAULT_FALLBACK))
    return ret;
    } else {
    - vmf.orig_pmd = *vmf.pmd;
    + vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);

    - barrier();
    if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
    VM_BUG_ON(thp_migration_supported() &&
    !is_pmd_migration_entry(vmf.orig_pmd));
    diff --git a/mm/mprotect.c b/mm/mprotect.c
    index 92d3d3ca390a..c5a13c0f1017 100644
    --- a/mm/mprotect.c
    +++ b/mm/mprotect.c
    @@ -309,11 +309,6 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
    {
    pmd_t pmdval = pmdp_get_lockless(pmd);

    - /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
    -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    - barrier();
    -#endif
    -
    if (pmd_none(pmdval))
    return 1;
    if (pmd_trans_huge(pmdval))
    diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
    index 4e448cfbc6ef..64aff6718bdb 100644
    --- a/mm/page_vma_mapped.c
    +++ b/mm/page_vma_mapped.c
    @@ -210,7 +210,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
    * compiler and used as a stale value after we've observed a
    * subsequent update.
    */
    - pmde = READ_ONCE(*pvmw->pmd);
    + pmde = pmdp_get_lockless(pvmw->pmd);

    if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
    (pmd_present(pmde) && pmd_devmap(pmde))) {
    --
    2.35.3
    \
     
     \ /
      Last update: 2023-06-09 03:07    [W:2.768 / U:0.080 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site