lkml.org 
[lkml]   [2020]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 16/21] mm/hugetlb: Set the PageHWPoison to the raw error page
    Date
    Because we reuse the first tail page, if we set PageHWPosion on a
    tail page. It indicates that we may set PageHWPoison on a series
    of pages. So we can use the head[4].mapping to record the real
    error page index and set the raw error page PageHWPoison later.

    Signed-off-by: Muchun Song <songmuchun@bytedance.com>
    ---
    mm/hugetlb.c | 50 ++++++++++++++++++++++++++++++++++++++++++--------
    1 file changed, 42 insertions(+), 8 deletions(-)

    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index 5aaa274b0684..00a6e97629aa 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -1794,6 +1794,29 @@ static inline void free_gigantic_page(struct hstate *h, struct page *page)
    {
    __free_gigantic_page(page, huge_page_order(h));
    }
    +
    +static inline void subpage_hwpoison_deliver(struct page *head)
    +{
    + struct page *page = head;
    +
    + if (PageHWPoison(head))
    + page = head + page_private(head + 4);
    +
    + /*
    + * Move PageHWPoison flag from head page to the raw error page,
    + * which makes any subpages rather than the error page reusable.
    + */
    + if (page != head) {
    + SetPageHWPoison(page);
    + ClearPageHWPoison(head);
    + }
    +}
    +
    +static inline void set_subpage_hwpoison(struct page *head, struct page *page)
    +{
    + if (PageHWPoison(head))
    + set_page_private(head + 4, page - head);
    +}
    #else
    static inline void hugetlb_vmemmap_init(struct hstate *h)
    {
    @@ -1841,6 +1864,22 @@ static inline void free_gigantic_page(struct hstate *h, struct page *page)
    __free_gigantic_page(page, huge_page_order(h));
    spin_lock(&hugetlb_lock);
    }
    +
    +static inline void subpage_hwpoison_deliver(struct page *head)
    +{
    +}
    +
    +static inline void set_subpage_hwpoison(struct page *head, struct page *page)
    +{
    + /*
    + * Move PageHWPoison flag from head page to the raw error page,
    + * which makes any subpages rather than the error page reusable.
    + */
    + if (PageHWPoison(head) && page != head) {
    + SetPageHWPoison(page);
    + ClearPageHWPoison(head);
    + }
    +}
    #endif

    static void update_and_free_page(struct hstate *h, struct page *page)
    @@ -1859,6 +1898,7 @@ static void __free_hugepage(struct hstate *h, struct page *page)
    int i;

    alloc_huge_page_vmemmap(h, page);
    + subpage_hwpoison_deliver(page);

    for (i = 0; i < pages_per_huge_page(h); i++) {
    page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
    @@ -2416,14 +2456,8 @@ int dissolve_free_huge_page(struct page *page)
    int nid = page_to_nid(head);
    if (h->free_huge_pages - h->resv_huge_pages == 0)
    goto out;
    - /*
    - * Move PageHWPoison flag from head page to the raw error page,
    - * which makes any subpages rather than the error page reusable.
    - */
    - if (PageHWPoison(head) && page != head) {
    - SetPageHWPoison(page);
    - ClearPageHWPoison(head);
    - }
    +
    + set_subpage_hwpoison(head, page);
    list_del(&head->lru);
    h->free_huge_pages--;
    h->free_huge_pages_node[nid]--;
    --
    2.11.0
    \
     
     \ /
      Last update: 2020-11-08 15:14    [W:4.076 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site