lkml.org 
[lkml]   [2021]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 13/25] mm/filemap: Add lock_folio
    Date
    This is like lock_page() but for use by callers who know they have a folio.
    Convert __lock_page() to be __lock_folio(). This saves one call to
    compound_head() per contended call to lock_page().

    Saves 362 bytes of text; mostly from improved register allocation and
    inlining decisions. __lock_folio is 59 bytes while __lock_page was 79.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    ---
    include/linux/pagemap.h | 24 +++++++++++++++++++-----
    mm/filemap.c | 29 +++++++++++++++--------------
    2 files changed, 34 insertions(+), 19 deletions(-)

    diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
    index a34cf531c100..034e41256340 100644
    --- a/include/linux/pagemap.h
    +++ b/include/linux/pagemap.h
    @@ -635,7 +635,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
    return true;
    }

    -extern void __lock_page(struct page *page);
    +void __lock_folio(struct folio *folio);
    extern int __lock_page_killable(struct page *page);
    extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
    extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    @@ -643,13 +643,24 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    void unlock_page(struct page *page);
    void unlock_folio(struct folio *folio);

    +static inline bool trylock_folio(struct folio *folio)
    +{
    + return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio)));
    +}
    +
    /*
    * Return true if the page was successfully locked
    */
    static inline int trylock_page(struct page *page)
    {
    - page = compound_head(page);
    - return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
    + return trylock_folio(page_folio(page));
    +}
    +
    +static inline void lock_folio(struct folio *folio)
    +{
    + might_sleep();
    + if (!trylock_folio(folio))
    + __lock_folio(folio);
    }

    /*
    @@ -657,9 +668,12 @@ static inline int trylock_page(struct page *page)
    */
    static inline void lock_page(struct page *page)
    {
    + struct folio *folio;
    might_sleep();
    - if (!trylock_page(page))
    - __lock_page(page);
    +
    + folio = page_folio(page);
    + if (!trylock_folio(folio))
    + __lock_folio(folio);
    }

    /*
    diff --git a/mm/filemap.c b/mm/filemap.c
    index 9960ef1b2758..3e3e3c666b94 100644
    --- a/mm/filemap.c
    +++ b/mm/filemap.c
    @@ -1160,7 +1160,7 @@ static void wake_up_page(struct page *page, int bit)
    */
    enum behavior {
    EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
    - * __lock_page() waiting on then setting PG_locked.
    + * __lock_folio() waiting on then setting PG_locked.
    */
    SHARED, /* Hold ref to page and check the bit when woken, like
    * wait_on_page_writeback() waiting on PG_writeback.
    @@ -1488,17 +1488,16 @@ void page_endio(struct page *page, bool is_write, int err)
    EXPORT_SYMBOL_GPL(page_endio);

    /**
    - * __lock_page - get a lock on the page, assuming we need to sleep to get it
    - * @__page: the page to lock
    + * __lock_folio - Get a lock on the folio, assuming we need to sleep to get it.
    + * @folio: The folio to lock
    */
    -void __lock_page(struct page *__page)
    +void __lock_folio(struct folio *folio)
    {
    - struct page *page = compound_head(__page);
    - wait_queue_head_t *q = page_waitqueue(page);
    - wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
    + wait_queue_head_t *q = page_waitqueue(&folio->page);
    + wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_UNINTERRUPTIBLE,
    EXCLUSIVE);
    }
    -EXPORT_SYMBOL(__lock_page);
    +EXPORT_SYMBOL(__lock_folio);

    int __lock_page_killable(struct page *__page)
    {
    @@ -1573,10 +1572,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    return 0;
    }
    } else {
    - __lock_page(page);
    + __lock_folio(page_folio(page));
    }
    - return 1;

    + return 1;
    }

    /**
    @@ -2720,7 +2719,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
    static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
    struct file **fpin)
    {
    - if (trylock_page(page))
    + struct folio *folio = page_folio(page);
    +
    + if (trylock_folio(folio))
    return 1;

    /*
    @@ -2733,7 +2734,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,

    *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
    if (vmf->flags & FAULT_FLAG_KILLABLE) {
    - if (__lock_page_killable(page)) {
    + if (__lock_page_killable(&folio->page)) {
    /*
    * We didn't have the right flags to drop the mmap_lock,
    * but all fault_handlers only check for fatal signals
    @@ -2745,11 +2746,11 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
    return 0;
    }
    } else
    - __lock_page(page);
    + __lock_folio(folio);
    +
    return 1;
    }

    -
    /*
    * Synchronous readahead happens when we don't even find a page in the page
    * cache at all. We don't want to perform IO under the mmap sem, so if we have
    --
    2.30.0
    \
     
     \ /
      Last update: 2021-03-05 05:24    [W:8.120 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site