lkml.org 
[lkml]   [2020]   [Dec]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v1 6/6] block/iomap: don't copy bvec for direct IO
Date
The block layer spends quite a while in blkdev_direct_IO() to copy and
initialise bio's bvec. However, if we've already got a bvec in the input
iterator it might be reused in some cases, i.e. when new
ITER_BVEC_FLAG_FIXED flag is set. Simple tests show considerable
performance boost, and it also reduces memory footprint.

Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
Documentation/filesystems/porting.rst | 9 ++++
block/bio.c | 64 +++++++++++----------------
include/linux/bio.h | 3 ++
3 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 867036aa90b8..47a622879952 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -865,3 +865,12 @@ no matter what. Everything is handled by the caller.

clone_private_mount() returns a longterm mount now, so the proper destructor of
its result is kern_unmount() or kern_unmount_array().
+
+---
+
+**mandatory**
+
+For bvec based itererators bio_iov_iter_get_pages() now doesn't copy bvecs but
+uses the one provided. Anyone issuing kiocb-I/O should ensure that the bvec and
+page references stay until I/O has completed, i.e. until ->ki_complete() has
+been called or returned with non -EIOCBQUEUED code.
diff --git a/block/bio.c b/block/bio.c
index 3192358c411f..f8229be24562 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -960,25 +960,16 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
}
EXPORT_SYMBOL_GPL(bio_release_pages);

-static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
+static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
{
- const struct bio_vec *bv = iter->bvec;
- struct page *page = bv->bv_page;
- bool same_page = false;
- unsigned int off, len;
-
- if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
- return -EINVAL;
-
- len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
- off = bv->bv_offset + iter->iov_offset;
-
- if (!__bio_try_merge_page(bio, page, len, off, &same_page)) {
- if (bio_full(bio, len))
- return -EINVAL;
- bio_add_page_noaccount(bio, page, len, off);
- }
- iov_iter_advance(iter, len);
+ WARN_ON_ONCE(BVEC_POOL_IDX(bio) != 0);
+ bio->bi_vcnt = iter->nr_segs;
+ bio->bi_max_vecs = iter->nr_segs;
+ bio->bi_io_vec = (struct bio_vec *)iter->bvec;
+ bio->bi_iter.bi_bvec_done = iter->iov_offset;
+ bio->bi_iter.bi_size = iter->count;
+
+ iov_iter_advance(iter, iter->count);
return 0;
}

@@ -1092,12 +1083,13 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
* This takes either an iterator pointing to user memory, or one pointing to
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and
* map them into the kernel. On IO completion, the caller should put those
- * pages. If we're adding kernel pages, and the caller told us it's safe to
- * do so, we just have to add the pages to the bio directly. We don't grab an
- * extra reference to those pages (the user should already have that), and we
- * don't put the page on IO completion. The caller needs to check if the bio is
- * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
- * released.
+ * pages. If we're adding kernel pages, it doesn't take extra page references
+ * and reuses the provided bvec, so the caller must ensure that the bvec isn't
+ * freed and page references remain to be taken until I/O has completed. If
+ * the I/O is completed asynchronously, the bvec must not be freed before
+ * ->ki_complete() has been called. The caller needs to check if the bio is
+ * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should
+ * be released.
*
* The function tries, but does not guarantee, to pin as many pages as
* fit into the bio, or are requested in @iter, whatever is smaller. If
@@ -1109,27 +1101,23 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- const bool is_bvec = iov_iter_is_bvec(iter);
int ret;

- if (WARN_ON_ONCE(bio->bi_vcnt))
- return -EINVAL;
+ if (iov_iter_is_bvec(iter)) {
+ if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
+ return -EINVAL;
+ bio_iov_bvec_set(bio, iter);
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ return 0;
+ }

do {
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- if (WARN_ON_ONCE(is_bvec))
- return -EINVAL;
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND)
ret = __bio_iov_append_get_pages(bio, iter);
- } else {
- if (is_bvec)
- ret = __bio_iov_bvec_add_pages(bio, iter);
- else
- ret = __bio_iov_iter_get_pages(bio, iter);
- }
+ else
+ ret = __bio_iov_iter_get_pages(bio, iter);
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));

- if (is_bvec)
- bio_set_flag(bio, BIO_NO_PAGE_REF);
return bio->bi_vcnt ? 0 : ret;
}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2a9f3f0bbe0a..337f4280b639 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -444,6 +444,9 @@ static inline void bio_wouldblock_error(struct bio *bio)

static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
{
+ /* reuse iter->bvec */
+ if (iov_iter_is_bvec(iter))
+ return 0;
return iov_iter_npages(iter, max_segs);
}

--
2.24.0
\
 
 \ /
  Last update: 2020-12-15 01:28    [W:0.135 / U:0.372 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site