lkml.org 
[lkml]   [2018]   [Mar]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 08/14] mm/hmm: cleanup special vma handling (VM_SPECIAL)
    Date
    From: Jérôme Glisse <jglisse@redhat.com>

    Special vma (one with any of the VM_SPECIAL flags) can not be access by
    device because there is no consistent model accross device drivers on
    those vma and their backing memory.

    This patch directly use hmm_range struct for hmm_pfns_special() argument
    as it is always affecting the whole vma and thus the whole range.

    It also make behavior consistent after this patch both hmm_vma_fault()
    and hmm_vma_get_pfns() returns -EINVAL when facing such vma. Previously
    hmm_vma_fault() returned 0 and hmm_vma_get_pfns() return -EINVAL but
    both were filling the HMM pfn array with special entry.

    Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
    Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
    Cc: Ralph Campbell <rcampbell@nvidia.com>
    Cc: Mark Hairgrove <mhairgrove@nvidia.com>
    Cc: John Hubbard <jhubbard@nvidia.com>
    ---
    mm/hmm.c | 40 ++++++++++++++++++++--------------------
    1 file changed, 20 insertions(+), 20 deletions(-)

    diff --git a/mm/hmm.c b/mm/hmm.c
    index f674b73e7f4a..04595a994542 100644
    --- a/mm/hmm.c
    +++ b/mm/hmm.c
    @@ -281,14 +281,6 @@ static int hmm_vma_do_fault(struct mm_walk *walk,
    return -EAGAIN;
    }

    -static void hmm_pfns_special(uint64_t *pfns,
    - unsigned long addr,
    - unsigned long end)
    -{
    - for (; addr < end; addr += PAGE_SIZE, pfns++)
    - *pfns = HMM_PFN_SPECIAL;
    -}
    -
    static int hmm_pfns_bad(unsigned long addr,
    unsigned long end,
    struct mm_walk *walk)
    @@ -486,6 +478,14 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
    return 0;
    }

    +static void hmm_pfns_special(struct hmm_range *range)
    +{
    + unsigned long addr = range->start, i = 0;
    +
    + for (; addr < range->end; addr += PAGE_SIZE, i++)
    + range->pfns[i] = HMM_PFN_SPECIAL;
    +}
    +
    /*
    * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
    * @range: range being snapshoted and all needed informations
    @@ -509,12 +509,6 @@ int hmm_vma_get_pfns(struct hmm_range *range)
    struct mm_walk mm_walk;
    struct hmm *hmm;

    - /* FIXME support hugetlb fs */
    - if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
    - hmm_pfns_special(range->pfns, range->start, range->end);
    - return -EINVAL;
    - }
    -
    /* Sanity check, this really should not happen ! */
    if (range->start < vma->vm_start || range->start >= vma->vm_end)
    return -EINVAL;
    @@ -528,6 +522,12 @@ int hmm_vma_get_pfns(struct hmm_range *range)
    if (!hmm->mmu_notifier.ops)
    return -EINVAL;

    + /* FIXME support hugetlb fs */
    + if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
    + hmm_pfns_special(range);
    + return -EINVAL;
    + }
    +
    /* Initialize range to track CPU page table update */
    spin_lock(&hmm->lock);
    range->valid = true;
    @@ -693,6 +693,12 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
    if (!hmm->mmu_notifier.ops)
    return -EINVAL;

    + /* FIXME support hugetlb fs */
    + if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
    + hmm_pfns_special(range);
    + return -EINVAL;
    + }
    +
    /* Initialize range to track CPU page table update */
    spin_lock(&hmm->lock);
    range->valid = true;
    @@ -710,12 +716,6 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
    return 0;
    }

    - /* FIXME support hugetlb fs */
    - if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
    - hmm_pfns_special(range->pfns, range->start, range->end);
    - return 0;
    - }
    -
    hmm_vma_walk.fault = true;
    hmm_vma_walk.write = write;
    hmm_vma_walk.block = block;
    --
    2.14.3
    \
     
     \ /
      Last update: 2018-03-16 20:15    [W:4.252 / U:0.064 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site