lkml.org 
[lkml]   [2021]   [Dec]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v9 10/10] fsdax: set a CoW flag when associate reflink mappings
Date
Introduce a FS_DAX_MAPPING_COW flag to support association with CoW file
mappings. In this case, the dax-RMAP already takes the responsibility
to look up for shared files by given dax page. The page->mapping is no
longer to used for rmap but for marking that this dax page is shared.
And to make sure disassociation works fine, we use page->index as
refcount, and clear page->mapping to the initial state when page->index
is decreased to 0.

With the help of this new flag, it is able to distinguish normal case
and CoW case, and keep the warning in normal case.

Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
---
fs/dax.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 57 insertions(+), 9 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index ad8ceea1f54c..e72ec712c002 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -335,12 +335,46 @@ static unsigned long dax_end_pfn(void *entry)
pfn < dax_end_pfn(entry); pfn++)

/*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
+ * Set FS_DAX_MAPPING_COW flag on the last bit of page->mapping to indicate that
+ * this is a reflink case. In this case, we associate this page->mapping with
+ * file mapping at the first time and only once.
+ */
+#define FS_DAX_MAPPING_COW 1UL
+
+#define MAPPING_SET_COW(m) (m = (struct address_space *)FS_DAX_MAPPING_COW)
+#define MAPPING_TEST_COW(m) (((unsigned long)m & FS_DAX_MAPPING_COW) == \
+ FS_DAX_MAPPING_COW)
+
+/*
+ * Set or Update the page->mapping with FS_DAX_MAPPING_COW flag.
+ * Return true if it is an Update.
+ */
+static inline bool dax_mapping_set_cow(struct page *page)
+{
+ if (page->mapping) {
+ /* flag already set */
+ if (MAPPING_TEST_COW(page->mapping))
+ return false;
+
+ /*
+ * This page has been mapped even before it is shared, just
+ * need to set this FS_DAX_MAPPING_COW flag.
+ */
+ MAPPING_SET_COW(page->mapping);
+ return true;
+ }
+ /* Newly associate CoW mapping */
+ MAPPING_SET_COW(page->mapping);
+ return false;
+}
+
+/*
+ * When it is called in dax_insert_entry(), the cow flag will indicate that
+ * whether this entry is shared by multiple files. If so, set the page->mapping
+ * to be FS_DAX_MAPPING_COW, and use page->index as refcount.
*/
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address)
+ struct vm_area_struct *vma, unsigned long address, bool cow)
{
unsigned long size = dax_entry_size(entry), pfn, index;
int i = 0;
@@ -352,9 +386,17 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);

- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
+ if (cow) {
+ if (dax_mapping_set_cow(page)) {
+ /* Was normal, now updated to CoW */
+ page->index = 2;
+ } else
+ page->index++;
+ } else {
+ WARN_ON_ONCE(page->mapping);
+ page->mapping = mapping;
+ page->index = index + i++;
+ }
}
}

@@ -370,7 +412,12 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
struct page *page = pfn_to_page(pfn);

WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+ if (!MAPPING_TEST_COW(page->mapping)) {
+ /* keep the CoW flag if this page is still shared */
+ if (page->index-- > 0)
+ continue;
+ } else
+ WARN_ON_ONCE(page->mapping && page->mapping != mapping);
page->mapping = NULL;
page->index = 0;
}
@@ -829,7 +876,8 @@ static void *dax_insert_entry(struct xa_state *xas,
void *old;

dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+ dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+ false);
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
--
2.34.1


\
 
 \ /
  Last update: 2021-12-26 15:36    [W:0.145 / U:0.852 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site