lkml.org 
[lkml]   [2019]   [Oct]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH v2 10/16] mm,hwpoison: Rework soft offline for free pages
    Date
    When trying to soft-offline a free page, we need to first take it off
    the buddy allocator.
    Once we know is out of reach, we can safely flag it as poisoned.

    take_page_off_buddy will be used to take a page meant to be poisoned
    off the buddy allocator.
    take_page_off_buddy calls break_down_buddy_pages, which splits a
    higher-order page in case our page belongs to one.

    Once the page is under our control, we call page_set_poison to set it
    as poisoned and grab a refcount on it.

    Signed-off-by: Oscar Salvador <osalvador@suse.de>
    ---
    mm/memory-failure.c | 20 +++++++++++-----
    mm/page_alloc.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++++
    2 files changed, 82 insertions(+), 6 deletions(-)

    diff --git a/mm/memory-failure.c b/mm/memory-failure.c
    index 37b230b8cfe7..1d986580522d 100644
    --- a/mm/memory-failure.c
    +++ b/mm/memory-failure.c
    @@ -78,6 +78,15 @@ EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
    EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
    EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);

    +extern bool take_page_off_buddy(struct page *page);
    +
    +static void page_handle_poison(struct page *page)
    +{
    + SetPageHWPoison(page);
    + page_ref_inc(page);
    + num_poisoned_pages_inc();
    +}
    +
    static int hwpoison_filter_dev(struct page *p)
    {
    struct address_space *mapping;
    @@ -1830,14 +1839,13 @@ static int soft_offline_in_use_page(struct page *page)

    static int soft_offline_free_page(struct page *page)
    {
    - int rc = dissolve_free_huge_page(page);
    + int rc = -EBUSY;

    - if (!rc) {
    - if (set_hwpoison_free_buddy_page(page))
    - num_poisoned_pages_inc();
    - else
    - rc = -EBUSY;
    + if (!dissolve_free_huge_page(page) && take_page_off_buddy(page)) {
    + page_handle_poison(page);
    + rc = 0;
    }
    +
    return rc;
    }

    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index cd1dd0712624..255df0c76a40 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -8632,6 +8632,74 @@ bool is_free_buddy_page(struct page *page)

    #ifdef CONFIG_MEMORY_FAILURE
    /*
    + * Break down a higher-order page in sub-pages, and keep our target out of
    + * buddy allocator.
    + */
    +static void break_down_buddy_pages(struct zone *zone, struct page *page,
    + struct page *target, int low, int high,
    + struct free_area *area, int migratetype)
    +{
    + unsigned long size = 1 << high;
    + struct page *current_buddy, *next_page;
    +
    + while (high > low) {
    + area--;
    + high--;
    + size >>= 1;
    +
    + if (target >= &page[size]) {
    + next_page = page + size;
    + current_buddy = page;
    + } else {
    + next_page = page;
    + current_buddy = page + size;
    + }
    +
    + if (set_page_guard(zone, current_buddy, high, migratetype))
    + continue;
    +
    + if (current_buddy != target) {
    + add_to_free_area(current_buddy, area, migratetype);
    + set_page_order(current_buddy, high);
    + page = next_page;
    + }
    + }
    +}
    +
    +/*
    + * Take a page that will be marked as poisoned off the buddy allocator.
    + */
    +bool take_page_off_buddy(struct page *page)
    + {
    + struct zone *zone = page_zone(page);
    + unsigned long pfn = page_to_pfn(page);
    + unsigned long flags;
    + unsigned int order;
    + bool ret = false;
    +
    + spin_lock_irqsave(&zone->lock, flags);
    + for (order = 0; order < MAX_ORDER; order++) {
    + struct page *page_head = page - (pfn & ((1 << order) - 1));
    + int buddy_order = page_order(page_head);
    + struct free_area *area = &(zone->free_area[buddy_order]);
    +
    + if (PageBuddy(page_head) && buddy_order >= order) {
    + unsigned long pfn_head = page_to_pfn(page_head);
    + int migratetype = get_pfnblock_migratetype(page_head,
    + pfn_head);
    +
    + del_page_from_free_area(page_head, area);
    + break_down_buddy_pages(zone, page_head, page, 0,
    + buddy_order, area, migratetype);
    + ret = true;
    + break;
    + }
    + }
    + spin_unlock_irqrestore(&zone->lock, flags);
    + return ret;
    + }
    +
    +/*
    * Set PG_hwpoison flag if a given page is confirmed to be a free page. This
    * test is performed under the zone lock to prevent a race against page
    * allocation.
    --
    2.12.3
    \
     
     \ /
      Last update: 2019-10-17 16:23    [W:4.363 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site