lkml.org 
[lkml]   [2021]   [Jul]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v14 018/138] mm/filemap: Add folio_lock()
    Date
    This is like lock_page() but for use by callers who know they have a folio.
    Convert __lock_page() to be __folio_lock(). This saves one call to
    compound_head() per contended call to lock_page().

    Saves 455 bytes of text; mostly from improved register allocation and
    inlining decisions. __folio_lock is 59 bytes while __lock_page was 79.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    Reviewed-by: Christoph Hellwig <hch@lst.de>
    Acked-by: Jeff Layton <jlayton@kernel.org>
    Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Acked-by: Vlastimil Babka <vbabka@suse.cz>
    Reviewed-by: William Kucharski <william.kucharski@oracle.com>
    Reviewed-by: David Howells <dhowells@redhat.com>
    ---
    include/linux/pagemap.h | 24 +++++++++++++++++++-----
    mm/filemap.c | 29 +++++++++++++++--------------
    2 files changed, 34 insertions(+), 19 deletions(-)

    diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
    index a13edc7a2916..c3673c55125b 100644
    --- a/include/linux/pagemap.h
    +++ b/include/linux/pagemap.h
    @@ -653,7 +653,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
    return true;
    }

    -extern void __lock_page(struct page *page);
    +void __folio_lock(struct folio *folio);
    extern int __lock_page_killable(struct page *page);
    extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
    extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    @@ -661,13 +661,24 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    void unlock_page(struct page *page);
    void folio_unlock(struct folio *folio);

    +static inline bool folio_trylock(struct folio *folio)
    +{
    + return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
    +}
    +
    /*
    * Return true if the page was successfully locked
    */
    static inline int trylock_page(struct page *page)
    {
    - page = compound_head(page);
    - return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
    + return folio_trylock(page_folio(page));
    +}
    +
    +static inline void folio_lock(struct folio *folio)
    +{
    + might_sleep();
    + if (!folio_trylock(folio))
    + __folio_lock(folio);
    }

    /*
    @@ -675,9 +686,12 @@ static inline int trylock_page(struct page *page)
    */
    static inline void lock_page(struct page *page)
    {
    + struct folio *folio;
    might_sleep();
    - if (!trylock_page(page))
    - __lock_page(page);
    +
    + folio = page_folio(page);
    + if (!folio_trylock(folio))
    + __folio_lock(folio);
    }

    /*
    diff --git a/mm/filemap.c b/mm/filemap.c
    index 1af67ef94e4c..95f89656f126 100644
    --- a/mm/filemap.c
    +++ b/mm/filemap.c
    @@ -1187,7 +1187,7 @@ static void wake_up_page(struct page *page, int bit)
    */
    enum behavior {
    EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
    - * __lock_page() waiting on then setting PG_locked.
    + * __folio_lock() waiting on then setting PG_locked.
    */
    SHARED, /* Hold ref to page and check the bit when woken, like
    * wait_on_page_writeback() waiting on PG_writeback.
    @@ -1578,17 +1578,16 @@ void page_endio(struct page *page, bool is_write, int err)
    EXPORT_SYMBOL_GPL(page_endio);

    /**
    - * __lock_page - get a lock on the page, assuming we need to sleep to get it
    - * @__page: the page to lock
    + * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
    + * @folio: The folio to lock
    */
    -void __lock_page(struct page *__page)
    +void __folio_lock(struct folio *folio)
    {
    - struct page *page = compound_head(__page);
    - wait_queue_head_t *q = page_waitqueue(page);
    - wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
    + wait_queue_head_t *q = page_waitqueue(&folio->page);
    + wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
    EXCLUSIVE);
    }
    -EXPORT_SYMBOL(__lock_page);
    +EXPORT_SYMBOL(__folio_lock);

    int __lock_page_killable(struct page *__page)
    {
    @@ -1663,10 +1662,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    return 0;
    }
    } else {
    - __lock_page(page);
    + __folio_lock(page_folio(page));
    }
    - return 1;

    + return 1;
    }

    /**
    @@ -2837,7 +2836,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
    static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
    struct file **fpin)
    {
    - if (trylock_page(page))
    + struct folio *folio = page_folio(page);
    +
    + if (folio_trylock(folio))
    return 1;

    /*
    @@ -2850,7 +2851,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,

    *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
    if (vmf->flags & FAULT_FLAG_KILLABLE) {
    - if (__lock_page_killable(page)) {
    + if (__lock_page_killable(&folio->page)) {
    /*
    * We didn't have the right flags to drop the mmap_lock,
    * but all fault_handlers only check for fatal signals
    @@ -2862,11 +2863,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
    return 0;
    }
    } else
    - __lock_page(page);
    + __folio_lock(folio);
    +
    return 1;
    }

    -
    /*
    * Synchronous readahead happens when we don't even find a page in the page
    * cache at all. We don't want to perform IO under the mmap sem, so if we have
    --
    2.30.2
    \
     
     \ /
      Last update: 2021-07-15 05:54    [W:4.057 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site