lkml.org 
[lkml]   [2020]   [Sep]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH v2 6/9] iomap: Convert read_count to read_bytes_pending
On Fri, Sep 11, 2020 at 12:47:04AM +0100, Matthew Wilcox (Oracle) wrote:
> Instead of counting bio segments, count the number of bytes submitted.
> This insulates us from the block layer's definition of what a 'same page'
> is, which is not necessarily clear once THPs are involved.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> fs/iomap/buffered-io.c | 41 ++++++++++++-----------------------------
> 1 file changed, 12 insertions(+), 29 deletions(-)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 9670c096b83e..1cf976a8e55c 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -26,7 +26,7 @@
> * to track sub-page uptodate status and I/O completions.
> */
> struct iomap_page {
> - atomic_t read_count;
> + atomic_t read_bytes_pending;
> atomic_t write_count;
> spinlock_t uptodate_lock;
> unsigned long uptodate[];
> @@ -72,7 +72,7 @@ iomap_page_release(struct page *page)
>
> if (!iop)
> return;
> - WARN_ON_ONCE(atomic_read(&iop->read_count));
> + WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
> WARN_ON_ONCE(atomic_read(&iop->write_count));
> WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
> PageUptodate(page));
> @@ -167,13 +167,6 @@ iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
> SetPageUptodate(page);
> }
>
> -static void
> -iomap_read_finish(struct iomap_page *iop, struct page *page)
> -{
> - if (!iop || atomic_dec_and_test(&iop->read_count))
> - unlock_page(page);
> -}
> -
> static void
> iomap_read_page_end_io(struct bio_vec *bvec, int error)
> {
> @@ -187,7 +180,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
> iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
> }
>
> - iomap_read_finish(iop, page);
> + if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
> + unlock_page(page);
> }
>
> static void
> @@ -267,30 +261,19 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
> }
>
> ctx->cur_page_in_bio = true;
> + if (iop)
> + atomic_add(plen, &iop->read_bytes_pending);
>
> - /*
> - * Try to merge into a previous segment if we can.
> - */
> + /* Try to merge into a previous segment if we can */
> sector = iomap_sector(iomap, pos);
> - if (ctx->bio && bio_end_sector(ctx->bio) == sector)
> + if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
> + if (__bio_try_merge_page(ctx->bio, page, plen, poff,
> + &same_page))
> + goto done;
> is_contig = true;
> -
> - if (is_contig &&
> - __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
> - if (!same_page && iop)
> - atomic_inc(&iop->read_count);
> - goto done;
> }
>
> - /*
> - * If we start a new segment we need to increase the read count, and we
> - * need to do so before submitting any previous full bio to make sure
> - * that we don't prematurely unlock the page.
> - */
> - if (iop)
> - atomic_inc(&iop->read_count);
> -
> - if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
> + if (!is_contig || bio_full(ctx->bio, plen)) {
> gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
> gfp_t orig_gfp = gfp;
> int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
> --
> 2.28.0
>

\
 
 \ /
  Last update: 2020-09-18 00:03    [W:0.214 / U:0.692 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site