lkml.org 
[lkml]   [2013]   [Dec]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 05/15] mm: numa: Call MMU notifiers on THP migration
Date
MMU notifiers must be called on THP page migration or secondary MMUs will
get very confused.

Signed-off-by: Mel Gorman <mgorman@suse.de>
---
mm/migrate.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index c4743d6..3a87511 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>

#include <asm/tlbflush.h>

@@ -1703,12 +1704,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unsigned long address,
struct page *page, int node)
{
- unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
struct mem_cgroup *memcg = NULL;
int page_lru = page_is_file_cache(page);
+ unsigned long mmun_start = address & HPAGE_PMD_MASK;
+ unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
pmd_t orig_entry;

/*
@@ -1750,10 +1752,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page));

/* Recheck the target PMD */
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
fail_putback:
spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);

/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
@@ -1794,10 +1798,11 @@ fail_putback:
* The SetPageUptodate on the new page and page_add_new_anon_rmap
* guarantee the copy is visible before the pagetable update.
*/
- flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
- page_add_new_anon_rmap(new_page, vma, haddr);
- pmdp_clear_flush(vma, haddr, pmd);
- set_pmd_at(mm, haddr, pmd, entry);
+ flush_cache_range(vma, mmun_start, mmun_end);
+ page_add_new_anon_rmap(new_page, vma, mmun_start);
+ pmdp_clear_flush(vma, mmun_start, pmd);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);

if (page_count(page) != 2) {
@@ -1817,6 +1822,7 @@ fail_putback:
*/
mem_cgroup_end_migration(memcg, page, new_page, true);
spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);

unlock_page(new_page);
unlock_page(page);
@@ -1837,7 +1843,7 @@ out_dropref:
spin_lock(&mm->page_table_lock);
if (pmd_same(*pmd, entry)) {
entry = pmd_mknonnuma(entry);
- set_pmd_at(mm, haddr, pmd, entry);
+ set_pmd_at(mm, mmun_start, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
}
spin_unlock(&mm->page_table_lock);
--
1.8.4


\
 
 \ /
  Last update: 2013-12-03 10:21    [W:0.133 / U:0.500 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site