lkml.org 
[lkml]   [2022]   [Aug]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RFC 6/9] cachefiles: check content map on read/write
Date
cachefiles_find_next_granule()/cachefiles_find_next_hole() are used to
check if the requested range has been cached or not. The return value of
these two functions imitates that of SEEK_[DATA|HOLE], so that the
existing codes can be resued as much as possible.

Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
---
fs/cachefiles/content-map.c | 30 ++++++++++++++++++++++++++++++
fs/cachefiles/internal.h | 4 ++++
fs/cachefiles/io.c | 36 +++++++++++++++++++++++++++++++-----
3 files changed, 65 insertions(+), 5 deletions(-)

diff --git a/fs/cachefiles/content-map.c b/fs/cachefiles/content-map.c
index 877ff79e181b..949ec5d9e4c9 100644
--- a/fs/cachefiles/content-map.c
+++ b/fs/cachefiles/content-map.c
@@ -220,3 +220,33 @@ void cachefiles_mark_content_map(struct cachefiles_object *object,
read_unlock_bh(&object->content_map_lock);
}

+loff_t cachefiles_find_next_granule(struct cachefiles_object *object,
+ loff_t start)
+{
+ unsigned long size, granule = start / CACHEFILES_GRAN_SIZE;
+ loff_t result;
+
+ read_lock_bh(&object->content_map_lock);
+ size = object->content_map_size * BITS_PER_BYTE;
+ result = find_next_bit(object->content_map, size, granule);
+ read_unlock_bh(&object->content_map_lock);
+
+ if (result == size)
+ return -ENXIO;
+ return result * CACHEFILES_GRAN_SIZE;
+}
+
+loff_t cachefiles_find_next_hole(struct cachefiles_object *object,
+ loff_t start)
+{
+ unsigned long size, granule = start / CACHEFILES_GRAN_SIZE;
+ loff_t result;
+
+ read_lock_bh(&object->content_map_lock);
+ size = object->content_map_size * BITS_PER_BYTE;
+ result = find_next_zero_bit(object->content_map, size, granule);
+ read_unlock_bh(&object->content_map_lock);
+
+ return min_t(loff_t, result * CACHEFILES_GRAN_SIZE,
+ object->cookie->object_size);
+}
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index c252746c8f9b..506700809a6d 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -183,6 +183,10 @@ extern int cachefiles_load_content_map(struct cachefiles_object *object);
extern void cachefiles_save_content_map(struct cachefiles_object *object);
extern void cachefiles_mark_content_map(struct cachefiles_object *object,
loff_t start, loff_t len);
+extern loff_t cachefiles_find_next_granule(struct cachefiles_object *object,
+ loff_t start);
+extern loff_t cachefiles_find_next_hole(struct cachefiles_object *object,
+ loff_t start);

/*
* daemon.c
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 27171fac649e..5c7c84cdafea 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -30,6 +30,32 @@ struct cachefiles_kiocb {
u64 b_writing;
};

+static loff_t cachefiles_seek_data(struct cachefiles_object *object,
+ struct file *file, loff_t start)
+{
+ switch (object->content_info) {
+ case CACHEFILES_CONTENT_MAP:
+ return cachefiles_find_next_granule(object, start);
+ case CACHEFILES_CONTENT_BACKFS_MAP:
+ return vfs_llseek(file, start, SEEK_DATA);
+ default:
+ return -EINVAL;
+ }
+}
+
+static loff_t cachefiles_seek_hole(struct cachefiles_object *object,
+ struct file *file, loff_t start)
+{
+ switch (object->content_info) {
+ case CACHEFILES_CONTENT_MAP:
+ return cachefiles_find_next_hole(object, start);
+ case CACHEFILES_CONTENT_BACKFS_MAP:
+ return vfs_llseek(file, start, SEEK_HOLE);
+ default:
+ return -EINVAL;
+ }
+}
+
static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
{
if (refcount_dec_and_test(&ki->ki_refcnt)) {
@@ -103,7 +129,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,

off2 = cachefiles_inject_read_error();
if (off2 == 0)
- off2 = vfs_llseek(file, off, SEEK_DATA);
+ off2 = cachefiles_seek_data(object, file, off);
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
skipped = 0;
ret = off2;
@@ -442,7 +468,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
retry:
off = cachefiles_inject_read_error();
if (off == 0)
- off = vfs_llseek(file, subreq->start, SEEK_DATA);
+ off = cachefiles_seek_data(object, file, subreq->start);
if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
if (off == (loff_t)-ENXIO) {
why = cachefiles_trace_read_seek_nxio;
@@ -468,7 +494,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *

to = cachefiles_inject_read_error();
if (to == 0)
- to = vfs_llseek(file, subreq->start, SEEK_HOLE);
+ to = cachefiles_seek_hole(object, file, subreq->start);
if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
trace_cachefiles_io_error(object, file_inode(file), to,
cachefiles_trace_seek_error);
@@ -537,7 +563,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,

pos = cachefiles_inject_read_error();
if (pos == 0)
- pos = vfs_llseek(file, *_start, SEEK_DATA);
+ pos = cachefiles_seek_data(object, file, *_start);
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
if (pos == -ENXIO)
goto check_space; /* Unallocated tail */
@@ -558,7 +584,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,

pos = cachefiles_inject_read_error();
if (pos == 0)
- pos = vfs_llseek(file, *_start, SEEK_HOLE);
+ pos = cachefiles_seek_hole(object, file, *_start);
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
trace_cachefiles_io_error(object, file_inode(file), pos,
cachefiles_trace_seek_error);
--
2.27.0
\
 
 \ /
  Last update: 2022-08-02 05:05    [W:0.052 / U:0.216 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site