lkml.org 
[lkml]   [2021]   [Nov]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 07/28] fs/buffer: Convert __block_write_begin_int() to take a folio
    Date
    There are no plans to convert buffer_head infrastructure to use multi-page
    folios, but __block_write_begin_int() is called from iomap, and it's
    more convenient and less error-prone if we pass in a folio from iomap.
    It also has a nice saving of almost 200 bytes of code from removing
    repeated calls to compound_head().

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    ---
    fs/buffer.c | 22 +++++++++++-----------
    fs/internal.h | 2 +-
    fs/iomap/buffered-io.c | 7 +++++--
    3 files changed, 17 insertions(+), 14 deletions(-)

    diff --git a/fs/buffer.c b/fs/buffer.c
    index 46bc589b7a03..b1d722b26fe9 100644
    --- a/fs/buffer.c
    +++ b/fs/buffer.c
    @@ -1969,34 +1969,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
    }
    }

    -int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
    +int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
    get_block_t *get_block, const struct iomap *iomap)
    {
    unsigned from = pos & (PAGE_SIZE - 1);
    unsigned to = from + len;
    - struct inode *inode = page->mapping->host;
    + struct inode *inode = folio->mapping->host;
    unsigned block_start, block_end;
    sector_t block;
    int err = 0;
    unsigned blocksize, bbits;
    struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;

    - BUG_ON(!PageLocked(page));
    + BUG_ON(!folio_test_locked(folio));
    BUG_ON(from > PAGE_SIZE);
    BUG_ON(to > PAGE_SIZE);
    BUG_ON(from > to);

    - head = create_page_buffers(page, inode, 0);
    + head = create_page_buffers(&folio->page, inode, 0);
    blocksize = head->b_size;
    bbits = block_size_bits(blocksize);

    - block = (sector_t)page->index << (PAGE_SHIFT - bbits);
    + block = (sector_t)folio->index << (PAGE_SHIFT - bbits);

    for(bh = head, block_start = 0; bh != head || !block_start;
    block++, block_start=block_end, bh = bh->b_this_page) {
    block_end = block_start + blocksize;
    if (block_end <= from || block_start >= to) {
    - if (PageUptodate(page)) {
    + if (folio_test_uptodate(folio)) {
    if (!buffer_uptodate(bh))
    set_buffer_uptodate(bh);
    }
    @@ -2016,20 +2016,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,

    if (buffer_new(bh)) {
    clean_bdev_bh_alias(bh);
    - if (PageUptodate(page)) {
    + if (folio_test_uptodate(folio)) {
    clear_buffer_new(bh);
    set_buffer_uptodate(bh);
    mark_buffer_dirty(bh);
    continue;
    }
    if (block_end > to || block_start < from)
    - zero_user_segments(page,
    + folio_zero_segments(folio,
    to, block_end,
    block_start, from);
    continue;
    }
    }
    - if (PageUptodate(page)) {
    + if (folio_test_uptodate(folio)) {
    if (!buffer_uptodate(bh))
    set_buffer_uptodate(bh);
    continue;
    @@ -2050,14 +2050,14 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
    err = -EIO;
    }
    if (unlikely(err))
    - page_zero_new_buffers(page, from, to);
    + page_zero_new_buffers(&folio->page, from, to);
    return err;
    }

    int __block_write_begin(struct page *page, loff_t pos, unsigned len,
    get_block_t *get_block)
    {
    - return __block_write_begin_int(page, pos, len, get_block, NULL);
    + return __block_write_begin_int(page_folio(page), pos, len, get_block, NULL);
    }
    EXPORT_SYMBOL(__block_write_begin);

    diff --git a/fs/internal.h b/fs/internal.h
    index cdd83d4899bb..afc13443392b 100644
    --- a/fs/internal.h
    +++ b/fs/internal.h
    @@ -37,7 +37,7 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
    /*
    * buffer.c
    */
    -int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
    +int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
    get_block_t *get_block, const struct iomap *iomap);

    /*
    diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
    index 1753c26c8e76..4e09ea823148 100644
    --- a/fs/iomap/buffered-io.c
    +++ b/fs/iomap/buffered-io.c
    @@ -597,6 +597,7 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
    const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
    const struct iomap *srcmap = iomap_iter_srcmap(iter);
    struct page *page;
    + struct folio *folio;
    int status = 0;

    BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
    @@ -618,11 +619,12 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
    status = -ENOMEM;
    goto out_no_page;
    }
    + folio = page_folio(page);

    if (srcmap->type == IOMAP_INLINE)
    status = iomap_write_begin_inline(iter, page);
    else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
    - status = __block_write_begin_int(page, pos, len, NULL, srcmap);
    + status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
    else
    status = __iomap_write_begin(iter, pos, len, page);

    @@ -954,11 +956,12 @@ EXPORT_SYMBOL_GPL(iomap_truncate_page);
    static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
    struct page *page)
    {
    + struct folio *folio = page_folio(page);
    loff_t length = iomap_length(iter);
    int ret;

    if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
    - ret = __block_write_begin_int(page, iter->pos, length, NULL,
    + ret = __block_write_begin_int(folio, iter->pos, length, NULL,
    &iter->iomap);
    if (ret)
    return ret;
    --
    2.33.0
    \
     
     \ /
      Last update: 2021-11-08 05:29    [W:2.596 / U:0.072 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site