lkml.org 
[lkml]   [2021]   [Dec]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 07/19] netfs: add netfs_readpage_demand()
Date
netfs_readpage_demand() is the demand-read version of
netfs_readpage().

When netfs API works in demand-read mode, fs using fscache shall call
netfs_readpage_demand() instead.

Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
---
fs/netfs/read_helper.c | 63 ++++++++++++++++++++++++++++++++++++++++++
include/linux/netfs.h | 3 ++
2 files changed, 66 insertions(+)

diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 9240b85548e4..26fa688f6300 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -1022,6 +1022,69 @@ int netfs_readpage(struct file *file,
}
EXPORT_SYMBOL(netfs_readpage);

+int netfs_readpage_demand(struct folio *folio,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv)
+{
+ struct netfs_read_request *rreq;
+ unsigned int debug_index = 0;
+ int ret;
+
+ _enter("%lx", folio_index(folio));
+
+ rreq = __netfs_alloc_read_request(ops, netfs_priv, NULL);
+ if (!rreq) {
+ if (netfs_priv)
+ ops->cleanup(netfs_priv, folio_file_mapping(folio));
+ folio_unlock(folio);
+ return -ENOMEM;
+ }
+ rreq->type = NETFS_TYPE_DEMAND;
+ rreq->folio = folio;
+ rreq->start = folio_file_pos(folio);
+ rreq->len = folio_size(folio);
+ __set_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags);
+
+ if (ops->begin_cache_operation) {
+ ret = ops->begin_cache_operation(rreq);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
+ folio_unlock(folio);
+ goto out;
+ }
+ }
+
+ netfs_stat(&netfs_n_rh_readpage);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+
+ netfs_get_read_request(rreq);
+
+ atomic_set(&rreq->nr_rd_ops, 1);
+ do {
+ if (!netfs_rreq_submit_slice(rreq, &debug_index))
+ break;
+
+ } while (rreq->submitted < rreq->len);
+
+ /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
+ * the service code isn't punted off to a random thread pool to
+ * process.
+ */
+ do {
+ wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
+ netfs_rreq_assess(rreq, false);
+ } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
+
+ ret = rreq->error;
+ if (ret == 0 && rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
+ ret = -EIO;
+ }
+out:
+ netfs_put_read_request(rreq, false);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_readpage_demand);
+
/*
* Prepare a folio for writing without reading first
* @folio: The folio being prepared
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 638ea5d63869..de6948bcc80a 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -261,6 +261,9 @@ extern int netfs_readpage(struct file *,
struct folio *,
const struct netfs_read_request_ops *,
void *);
+extern int netfs_readpage_demand(struct folio *,
+ const struct netfs_read_request_ops *,
+ void *);
extern int netfs_write_begin(struct file *, struct address_space *,
loff_t, unsigned int, unsigned int, struct folio **,
void **,
--
2.27.0
\
 
 \ /
  Last update: 2021-12-10 08:36    [W:0.372 / U:0.200 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site