lkml.org 
[lkml]   [2022]   [May]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v4 4/5] mm/shmem: fix infinite loop when swap in shmem error at swapoff time
Date
When swap in shmem error at swapoff time, there would be a infinite loop
in the while loop in shmem_unuse_inode(). It's because swapin error is
deliberately ignored now and thus info->swapped will never reach 0. So
we can't escape the loop in shmem_unuse().

In order to fix the issue, swapin_error entry is stored in the mapping
when swapin error occurs. So the swapcache page can be freed and the
user won't end up with a permanently mounted swap because a sector is
bad. If the page is accessed later, the user process will be killed
so that corrupted data is never consumed. On the other hand, if the
page is never accessed, the user won't even notice it.

Reported-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
mm/shmem.c | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)

diff --git a/mm/shmem.c b/mm/shmem.c
index d3c7970e0179..d55dd972023a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1175,6 +1175,10 @@ static int shmem_find_swap_entries(struct address_space *mapping,
continue;

entry = radix_to_swp_entry(folio);
+ /*
+ * swapin error entries can be found in the mapping. But they're
+ * deliberately ignored here as we've done everything we can do.
+ */
if (swp_type(entry) != type)
continue;

@@ -1672,6 +1676,36 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
return error;
}

+static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
+ struct folio *folio, swp_entry_t swap)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ swp_entry_t swapin_error;
+ void *old;
+
+ swapin_error = make_swapin_error_entry(&folio->page);
+ old = xa_cmpxchg_irq(&mapping->i_pages, index,
+ swp_to_radix_entry(swap),
+ swp_to_radix_entry(swapin_error), 0);
+ if (old != swp_to_radix_entry(swap))
+ return;
+
+ folio_wait_writeback(folio);
+ delete_from_swap_cache(&folio->page);
+ spin_lock_irq(&info->lock);
+ /*
+ * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
+ * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
+ * shmem_evict_inode.
+ */
+ info->alloced--;
+ info->swapped--;
+ shmem_recalc_inode(inode);
+ spin_unlock_irq(&info->lock);
+ swap_free(swap);
+}
+
/*
* Swap in the page pointed to by *pagep.
* Caller has to make sure that *pagep contains a valid swapped page.
@@ -1695,6 +1729,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
swap = radix_to_swp_entry(*foliop);
*foliop = NULL;

+ if (is_swapin_error_entry(swap))
+ return -EIO;
+
/* Look it up and read it in.. */
page = lookup_swap_cache(swap, NULL, 0);
if (!page) {
@@ -1762,6 +1799,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
failed:
if (!shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
+ if (error == -EIO)
+ shmem_set_folio_swapin_error(inode, index, folio, swap);
unlock:
if (folio) {
folio_unlock(folio);
--
2.23.0
\
 
 \ /
  Last update: 2022-05-19 14:50    [W:1.783 / U:0.004 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site