lkml.org 
[lkml]   [2022]   [Sep]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH v9 1/8] mm: introduce FOLL_PCI_P2PDMA to gate getting PCI P2PDMA pages
From
On 8/25/22 08:24, Logan Gunthorpe wrote:
> GUP Callers that expect PCI P2PDMA pages can now set FOLL_PCI_P2PDMA to
> allow obtaining P2PDMA pages. If GUP is called without the flag and a
> P2PDMA page is found, it will return an error.
>
> FOLL_PCI_P2PDMA cannot be set if FOLL_LONGTERM is set.
>
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> ---
> include/linux/mm.h | 1 +
> mm/gup.c | 22 +++++++++++++++++++++-
> 2 files changed, 22 insertions(+), 1 deletion(-)
>

Looks good. And I see that Dan Williams' upcoming "Fix the DAX-gup
mistake" series will remove the need for most (all?) of the
undo_dev_pagemap() calls that you have to make here, so the end result
will be even simpler.


Reviewed-by: John Hubbard <jhubbard@nvidia.com>

thanks,

--
John Hubbard
NVIDIA

> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 3bedc449c14d..37a3e91e6e77 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2891,6 +2891,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
> #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
> #define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
> #define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
> +#define FOLL_PCI_P2PDMA 0x100000 /* allow returning PCI P2PDMA pages */
>
> /*
> * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
> diff --git a/mm/gup.c b/mm/gup.c
> index 732825157430..79aea452619e 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -566,6 +566,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
> goto out;
> }
>
> + if (unlikely(!(flags & FOLL_PCI_P2PDMA) &&
> + is_pci_p2pdma_page(page))) {
> + page = ERR_PTR(-EREMOTEIO);
> + goto out;
> + }
> +
> VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
> !PageAnonExclusive(page), page);
>
> @@ -1015,6 +1021,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
> if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
> return -EOPNOTSUPP;
>
> + if ((gup_flags & FOLL_LONGTERM) && (gup_flags & FOLL_PCI_P2PDMA))
> + return -EOPNOTSUPP;
> +
> if (vma_is_secretmem(vma))
> return -EFAULT;
>
> @@ -2359,6 +2368,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
> VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
> page = pte_page(pte);
>
> + if (unlikely(!(flags & FOLL_PCI_P2PDMA) &&
> + is_pci_p2pdma_page(page)))
> + goto pte_unmap;
> +
> folio = try_grab_folio(page, 1, flags);
> if (!folio)
> goto pte_unmap;
> @@ -2438,6 +2451,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
> undo_dev_pagemap(nr, nr_start, flags, pages);
> break;
> }
> +
> + if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
> + undo_dev_pagemap(nr, nr_start, flags, pages);
> + break;
> + }
> +
> SetPageReferenced(page);
> pages[*nr] = page;
> if (unlikely(!try_grab_page(page, flags))) {
> @@ -2926,7 +2945,8 @@ static int internal_get_user_pages_fast(unsigned long start,
>
> if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
> FOLL_FORCE | FOLL_PIN | FOLL_GET |
> - FOLL_FAST_ONLY | FOLL_NOFAULT)))
> + FOLL_FAST_ONLY | FOLL_NOFAULT |
> + FOLL_PCI_P2PDMA)))
> return -EINVAL;
>
> if (gup_flags & FOLL_PIN)


\
 
 \ /
  Last update: 2022-09-06 00:27    [W:0.156 / U:0.096 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site