lkml.org 
[lkml]   [2020]   [May]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 15/36] iomap: Support large pages in read paths
    Date
    From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

    Use thp_size() instead of PAGE_SIZE, offset_in_thp() instead of
    offset_in_page() and bio_for_each_thp_segment_all().

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    ---
    fs/iomap/buffered-io.c | 21 ++++++++++++++++-----
    1 file changed, 16 insertions(+), 5 deletions(-)

    diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
    index 423ffc9d4a97..75f42c0d4cd9 100644
    --- a/fs/iomap/buffered-io.c
    +++ b/fs/iomap/buffered-io.c
    @@ -198,7 +198,7 @@ iomap_read_end_io(struct bio *bio)
    struct bio_vec *bvec;
    struct bvec_iter_all iter_all;

    - bio_for_each_segment_all(bvec, bio, iter_all)
    + bio_for_each_thp_segment_all(bvec, bio, iter_all)
    iomap_read_page_end_io(bvec, error);
    bio_put(bio);
    }
    @@ -238,6 +238,16 @@ static inline bool iomap_block_needs_zeroing(struct inode *inode,
    pos >= i_size_read(inode);
    }

    +/*
    + * Estimate the number of vectors we need based on the current page size;
    + * if we're wrong we'll end up doing an overly large allocation or needing
    + * to do a second allocation, neither of which is a big deal.
    + */
    +static unsigned int iomap_nr_vecs(struct page *page, loff_t length)
    +{
    + return (length + thp_size(page) - 1) >> page_shift(page);
    +}
    +
    static loff_t
    iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
    struct iomap *iomap, struct iomap *srcmap)
    @@ -294,7 +304,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
    if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
    gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
    gfp_t orig_gfp = gfp;
    - int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
    + int nr_vecs = iomap_nr_vecs(page, length);

    if (ctx->bio)
    submit_bio(ctx->bio);
    @@ -338,9 +348,9 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)

    trace_iomap_readpage(page->mapping->host, 1);

    - for (poff = 0; poff < PAGE_SIZE; poff += ret) {
    + for (poff = 0; poff < thp_size(page); poff += ret) {
    ret = iomap_apply(inode, page_offset(page) + poff,
    - PAGE_SIZE - poff, 0, ops, &ctx,
    + thp_size(page) - poff, 0, ops, &ctx,
    iomap_readpage_actor);
    if (ret <= 0) {
    WARN_ON_ONCE(ret == 0);
    @@ -374,7 +384,8 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
    loff_t done, ret;

    for (done = 0; done < length; done += ret) {
    - if (ctx->cur_page && offset_in_page(pos + done) == 0) {
    + if (ctx->cur_page &&
    + offset_in_thp(ctx->cur_page, pos + done) == 0) {
    if (!ctx->cur_page_in_bio)
    unlock_page(ctx->cur_page);
    put_page(ctx->cur_page);
    --
    2.26.2
    \
     
     \ /
      Last update: 2020-05-15 15:19    [W:3.290 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site