lkml.org 
[lkml]   [2022]   [Jul]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[mm-unstable PATCH v7 4/8] mm, hwpoison: make unpoison aware of raw error info in hwpoisoned hugepage
Date
From: Naoya Horiguchi <naoya.horiguchi@nec.com>

Raw error info list needs to be removed when hwpoisoned hugetlb is
unpoisoned. And unpoison handler needs to know how many errors there
are in the target hugepage. So add them.

HPageVmemmapOptimized(hpage) and HPageRawHwpUnreliable(hpage)) sometimes
can't be unpoisoned, so skip them.

Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Reported-by: kernel test robot <lkp@intel.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
---
v6 -> v7:
- free_raw_hwp_pages() checks HPageVmemmapOptimized(hpage) only when
move_flag is true so that unpoison works for HPageVmemmapOptimized pages.

v5 -> v6:
- set type of return value of hugetlb_clear_page_hwpoison() to void
- change type of return value of hugetlb_clear_page_hwpoison() to unsigned long

v4 -> v5:
- fix type of return value of free_raw_hwp_pages()
(found by kernel test robot),
- prevent unpoison for HPageVmemmapOptimized and HPageRawHwpUnreliable.
---
include/linux/swapops.h | 9 +++++++
mm/memory-failure.c | 52 +++++++++++++++++++++++++++++++++++++----
2 files changed, 56 insertions(+), 5 deletions(-)

diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index a01aeb3fcc0b..ddc98f96ad2c 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -498,6 +498,11 @@ static inline void num_poisoned_pages_dec(void)
atomic_long_dec(&num_poisoned_pages);
}

+static inline void num_poisoned_pages_sub(long i)
+{
+ atomic_long_sub(i, &num_poisoned_pages);
+}
+
#else

static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -518,6 +523,10 @@ static inline struct page *hwpoison_entry_to_page(swp_entry_t entry)
static inline void num_poisoned_pages_inc(void)
{
}
+
+static inline void num_poisoned_pages_sub(long i)
+{
+}
#endif

static inline int non_swap_entry(swp_entry_t entry)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index fa29849769ed..8b9c0d228549 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1680,19 +1680,23 @@ static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
}

-static void __free_raw_hwp_pages(struct page *hpage)
+static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag)
{
struct llist_head *head;
struct llist_node *t, *tnode;
+ unsigned long count = 0;

head = raw_hwp_list_head(hpage);
llist_for_each_safe(tnode, t, head->first) {
struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);

- SetPageHWPoison(p->page);
+ if (move_flag)
+ SetPageHWPoison(p->page);
kfree(p);
+ count++;
}
llist_del_all(head);
+ return count;
}

static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
@@ -1735,17 +1739,36 @@ static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
* Once HPageRawHwpUnreliable is set, raw_hwp_page is not
* used any more, so free it.
*/
- __free_raw_hwp_pages(hpage);
+ __free_raw_hwp_pages(hpage, false);
}
return ret;
}

+static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
+{
+ /*
+ * HPageVmemmapOptimized hugepages can't be freed because struct
+ * pages for tail pages are required but they don't exist.
+ */
+ if (move_flag && HPageVmemmapOptimized(hpage))
+ return 0;
+
+ /*
+ * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
+ * definition.
+ */
+ if (HPageRawHwpUnreliable(hpage))
+ return 0;
+
+ return __free_raw_hwp_pages(hpage, move_flag);
+}
+
void hugetlb_clear_page_hwpoison(struct page *hpage)
{
if (HPageRawHwpUnreliable(hpage))
return;
ClearPageHWPoison(hpage);
- __free_raw_hwp_pages(hpage);
+ free_raw_hwp_pages(hpage, true);
}

/*
@@ -1889,6 +1912,10 @@ static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *
return 0;
}

+static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
+{
+ return 0;
+}
#endif /* CONFIG_HUGETLB_PAGE */

static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
@@ -2294,6 +2321,7 @@ int unpoison_memory(unsigned long pfn)
struct page *p;
int ret = -EBUSY;
int freeit = 0;
+ unsigned long count = 1;
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);

@@ -2341,6 +2369,13 @@ int unpoison_memory(unsigned long pfn)

ret = get_hwpoison_page(p, MF_UNPOISON);
if (!ret) {
+ if (PageHuge(p)) {
+ count = free_raw_hwp_pages(page, false);
+ if (count == 0) {
+ ret = -EBUSY;
+ goto unlock_mutex;
+ }
+ }
ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
} else if (ret < 0) {
if (ret == -EHWPOISON) {
@@ -2349,6 +2384,13 @@ int unpoison_memory(unsigned long pfn)
unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
pfn, &unpoison_rs);
} else {
+ if (PageHuge(p)) {
+ count = free_raw_hwp_pages(page, false);
+ if (count == 0) {
+ ret = -EBUSY;
+ goto unlock_mutex;
+ }
+ }
freeit = !!TestClearPageHWPoison(p);

put_page(page);
@@ -2361,7 +2403,7 @@ int unpoison_memory(unsigned long pfn)
unlock_mutex:
mutex_unlock(&mf_mutex);
if (!ret || freeit) {
- num_poisoned_pages_dec();
+ num_poisoned_pages_sub(count);
unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
page_to_pfn(p), &unpoison_rs);
}
--
2.25.1
\
 
 \ /
  Last update: 2022-07-14 06:30    [W:0.098 / U:2.248 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site