lkml.org 
[lkml]   [2021]   [Jan]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 06/20] fs/task_mmu: use mmu_gather interface of clear-soft-dirty
Date
From: Nadav Amit <namit@vmware.com>

Use mmu_gather interface in task_mmu instead of
{inc|dec}_tlb_flush_pending(). This would allow to consolidate the code
and to avoid potential bugs.

Signed-off-by: Nadav Amit <namit@vmware.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: x86@kernel.org
---
fs/proc/task_mmu.c | 27 ++++++++++++++++++++++++---
1 file changed, 24 insertions(+), 3 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3cec6fbef725..4cd048ffa0f6 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1032,8 +1032,25 @@ enum clear_refs_types {

struct clear_refs_private {
enum clear_refs_types type;
+ struct mmu_gather tlb;
};

+static int tlb_pre_vma(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct clear_refs_private *cp = walk->private;
+
+ tlb_start_vma(&cp->tlb, walk->vma);
+ return 0;
+}
+
+static void tlb_post_vma(struct mm_walk *walk)
+{
+ struct clear_refs_private *cp = walk->private;
+
+ tlb_end_vma(&cp->tlb, walk->vma);
+}
+
#ifdef CONFIG_MEM_SOFT_DIRTY

#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
@@ -1140,6 +1157,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
/* Clear accessed and referenced bits. */
pmdp_test_and_clear_young(vma, addr, pmd);
test_and_clear_page_young(page);
+ tlb_flush_pmd_range(&cp->tlb, addr, HPAGE_PMD_SIZE);
ClearPageReferenced(page);
out:
spin_unlock(ptl);
@@ -1155,6 +1173,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,

if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty(vma, addr, pte);
+ tlb_flush_pte_range(&cp->tlb, addr, PAGE_SIZE);
continue;
}

@@ -1168,6 +1187,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
test_and_clear_page_young(page);
+ tlb_flush_pte_range(&cp->tlb, addr, PAGE_SIZE);
ClearPageReferenced(page);
}
pte_unmap_unlock(pte - 1, ptl);
@@ -1198,6 +1218,8 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
}

static const struct mm_walk_ops clear_refs_walk_ops = {
+ .pre_vma = tlb_pre_vma,
+ .post_vma = tlb_post_vma,
.pmd_entry = clear_refs_pte_range,
.test_walk = clear_refs_test_walk,
};
@@ -1248,6 +1270,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
goto out_unlock;
}

+ tlb_gather_mmu(&cp.tlb, mm);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1256,7 +1279,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
vma_set_page_prot(vma);
}

- inc_tlb_flush_pending(mm);
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
0, NULL, mm, 0, -1UL);
mmu_notifier_invalidate_range_start(&range);
@@ -1265,10 +1287,9 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
&cp);
if (type == CLEAR_REFS_SOFT_DIRTY) {
mmu_notifier_invalidate_range_end(&range);
- flush_tlb_mm(mm);
- dec_tlb_flush_pending(mm);
}
out_unlock:
+ tlb_finish_mmu(&cp.tlb);
mmap_write_unlock(mm);
out_mm:
mmput(mm);
--
2.25.1
\
 
 \ /
  Last update: 2021-01-31 01:18    [W:0.351 / U:2.860 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site