lkml.org 
[lkml]   [2018]   [Jun]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH -mm -v4 16/21] mm, THP, swap: Free PMD swap mapping when zap_huge_pmd()
    Date
    From: Huang Ying <ying.huang@intel.com>

    For a PMD swap mapping, zap_huge_pmd() will clear the PMD and call
    free_swap_and_cache() to decrease the swap reference count and maybe
    free or split the huge swap cluster and the THP in swap cache.

    Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
    Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
    Cc: Andrea Arcangeli <aarcange@redhat.com>
    Cc: Michal Hocko <mhocko@suse.com>
    Cc: Johannes Weiner <hannes@cmpxchg.org>
    Cc: Shaohua Li <shli@kernel.org>
    Cc: Hugh Dickins <hughd@google.com>
    Cc: Minchan Kim <minchan@kernel.org>
    Cc: Rik van Riel <riel@redhat.com>
    Cc: Dave Hansen <dave.hansen@linux.intel.com>
    Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
    Cc: Zi Yan <zi.yan@cs.rutgers.edu>
    Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
    ---
    mm/huge_memory.c | 32 +++++++++++++++++++++-----------
    1 file changed, 21 insertions(+), 11 deletions(-)

    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index 38c247a38f67..6b9ca1c14500 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -2007,7 +2007,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
    spin_unlock(ptl);
    if (is_huge_zero_pmd(orig_pmd))
    tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
    - } else if (is_huge_zero_pmd(orig_pmd)) {
    + } else if (pmd_present(orig_pmd) && is_huge_zero_pmd(orig_pmd)) {
    zap_deposited_table(tlb->mm, pmd);
    spin_unlock(ptl);
    tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
    @@ -2020,17 +2020,27 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
    page_remove_rmap(page, true);
    VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
    VM_BUG_ON_PAGE(!PageHead(page), page);
    - } else if (thp_migration_supported()) {
    - swp_entry_t entry;
    -
    - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
    - entry = pmd_to_swp_entry(orig_pmd);
    - page = pfn_to_page(swp_offset(entry));
    + } else {
    + swp_entry_t entry = pmd_to_swp_entry(orig_pmd);
    +
    + if (thp_migration_supported() &&
    + is_migration_entry(entry))
    + page = pfn_to_page(swp_offset(entry));
    + else if (thp_swap_supported() &&
    + !non_swap_entry(entry))
    + free_swap_and_cache(entry, true);
    + else {
    + WARN_ONCE(1,
    +"Non present huge pmd without pmd migration or swap enabled!");
    + goto unlock;
    + }
    flush_needed = 0;
    - } else
    - WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
    + }

    - if (PageAnon(page)) {
    + if (!page) {
    + zap_deposited_table(tlb->mm, pmd);
    + add_mm_counter(tlb->mm, MM_SWAPENTS, -HPAGE_PMD_NR);
    + } else if (PageAnon(page)) {
    zap_deposited_table(tlb->mm, pmd);
    add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
    } else {
    @@ -2038,7 +2048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
    zap_deposited_table(tlb->mm, pmd);
    add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR);
    }
    -
    +unlock:
    spin_unlock(ptl);
    if (flush_needed)
    tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
    --
    2.16.4
    \
     
     \ /
      Last update: 2018-06-22 05:57    [W:4.281 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site