| Date | Mon, 10 Jan 2022 23:41:31 -0800 | Subject | Re: [PATCH v2 25/28] gup: Convert compound_next() to gup_folio_next() | From | John Hubbard <> |
| |
On 1/9/22 20:24, Matthew Wilcox (Oracle) wrote: > Convert both callers to work on folios instead of pages. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > mm/gup.c | 41 ++++++++++++++++++++++------------------- > 1 file changed, 22 insertions(+), 19 deletions(-)
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
thanks, -- John Hubbard NVIDIA
> > diff --git a/mm/gup.c b/mm/gup.c > index b5786e83c418..0cf2d5fd8d2d 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -220,20 +220,20 @@ static inline struct page *compound_range_next(unsigned long i, > return page; > } > > -static inline struct page *compound_next(unsigned long i, > +static inline struct folio *gup_folio_next(unsigned long i, > unsigned long npages, struct page **list, unsigned int *ntails) > { > - struct page *page; > + struct folio *folio; > unsigned int nr; > > - page = compound_head(list[i]); > + folio = page_folio(list[i]); > for (nr = i + 1; nr < npages; nr++) { > - if (compound_head(list[nr]) != page) > + if (page_folio(list[nr]) != folio) > break; > } > > *ntails = nr - i; > - return page; > + return folio; > } > > /** > @@ -261,17 +261,17 @@ static inline struct page *compound_next(unsigned long i, > void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, > bool make_dirty) > { > - unsigned long index; > - struct page *head; > - unsigned int ntails; > + unsigned long i; > + struct folio *folio; > + unsigned int nr; > > if (!make_dirty) { > unpin_user_pages(pages, npages); > return; > } > > - for (index = 0; index < npages; index += ntails) { > - head = compound_next(index, npages, pages, &ntails); > + for (i = 0; i < npages; i += nr) { > + folio = gup_folio_next(i, npages, pages, &nr); > /* > * Checking PageDirty at this point may race with > * clear_page_dirty_for_io(), but that's OK. Two key > @@ -292,9 +292,12 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, > * written back, so it gets written back again in the > * next writeback cycle. This is harmless. > */ > - if (!PageDirty(head)) > - set_page_dirty_lock(head); > - put_compound_head(head, ntails, FOLL_PIN); > + if (!folio_test_dirty(folio)) { > + folio_lock(folio); > + folio_mark_dirty(folio); > + folio_unlock(folio); > + } > + gup_put_folio(folio, nr, FOLL_PIN); > } > } > EXPORT_SYMBOL(unpin_user_pages_dirty_lock); > @@ -347,9 +350,9 @@ EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); > */ > void unpin_user_pages(struct page **pages, unsigned long npages) > { > - unsigned long index; > - struct page *head; > - unsigned int ntails; > + unsigned long i; > + struct folio *folio; > + unsigned int nr; > > /* > * If this WARN_ON() fires, then the system *might* be leaking pages (by > @@ -359,9 +362,9 @@ void unpin_user_pages(struct page **pages, unsigned long npages) > if (WARN_ON(IS_ERR_VALUE(npages))) > return; > > - for (index = 0; index < npages; index += ntails) { > - head = compound_next(index, npages, pages, &ntails); > - put_compound_head(head, ntails, FOLL_PIN); > + for (i = 0; i < npages; i += nr) { > + folio = gup_folio_next(i, npages, pages, &nr); > + gup_put_folio(folio, nr, FOLL_PIN); > } > } > EXPORT_SYMBOL(unpin_user_pages);
|