lkml.org 
[lkml]   [2021]   [Apr]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 14/37] mm: add pte_map_lock() and pte_spinlock()
Date
pte_map_lock() and pte_spinlock() are used by fault handlers to ensure
the pte is mapped and locked before they commit the faulted page to the
mm's address space at the end of the fault.

The functions differ in their preconditions; pte_map_lock() expects
the pte to be unmapped prior to the call, while pte_spinlock() expects
it to be already mapped.

In the speculative fault case, the functions verify, after locking the pte,
that the mmap sequence count has not changed since the start of the fault,
and thus that no mmap lock writers have been running concurrently with
the fault. After that point the page table lock serializes any further
races with concurrent mmap lock writers.

If the mmap sequence count check fails, both functions will return false
with the pte being left unmapped and unlocked.

Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
---
include/linux/mm.h | 34 ++++++++++++++++++++++
mm/memory.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 105 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index dee8a4833779..f26490aff514 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3183,5 +3183,39 @@ extern int sysctl_nr_trim_pages;

void mem_dump_obj(void *object);

+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+
+bool __pte_map_lock(struct vm_fault *vmf);
+
+static inline bool pte_map_lock(struct vm_fault *vmf)
+{
+ VM_BUG_ON(vmf->pte);
+ return __pte_map_lock(vmf);
+}
+
+static inline bool pte_spinlock(struct vm_fault *vmf)
+{
+ VM_BUG_ON(!vmf->pte);
+ return __pte_map_lock(vmf);
+}
+
+#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
+
+static inline bool pte_map_lock(struct vm_fault *vmf)
+{
+ vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
+ &vmf->ptl);
+ return true;
+}
+
+static inline bool pte_spinlock(struct vm_fault *vmf)
+{
+ vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+ spin_lock(vmf->ptl);
+ return true;
+}
+
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/mm/memory.c b/mm/memory.c
index a17704aac019..3e192d5f89a6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2566,6 +2566,77 @@ int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL_GPL(apply_to_existing_page_range);

+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+
+bool __pte_map_lock(struct vm_fault *vmf)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pmd_t pmdval;
+#endif
+ pte_t *pte = vmf->pte;
+ spinlock_t *ptl;
+
+ if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
+ vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+ if (!pte)
+ vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+ spin_lock(vmf->ptl);
+ return true;
+ }
+
+ local_irq_disable();
+ if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq))
+ goto fail;
+ /*
+ * The mmap sequence count check guarantees that the page
+ * tables are still valid at that point, and having IRQs
+ * disabled ensures that they stay around (see Fast GUP
+ * comment in mm/gup.c).
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+ * We check if the pmd value is still the same to ensure that there
+ * is not a huge collapse operation in progress in our back.
+ */
+ pmdval = READ_ONCE(*vmf->pmd);
+ if (!pmd_same(pmdval, vmf->orig_pmd))
+ goto fail;
+#endif
+ ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+ if (!pte)
+ pte = pte_offset_map(vmf->pmd, vmf->address);
+ /*
+ * Try locking the page table.
+ *
+ * Note that we might race against zap_pte_range() which
+ * invalidates TLBs while holding the page table lock.
+ * We still have local IRQs disabled here to prevent the
+ * page table from being reclaimed, and zap_pte_range() could
+ * thus deadlock with us if we tried using spin_lock() here.
+ *
+ * We also don't want to retry until spin_trylock() succeeds,
+ * because of the starvation potential against a stream of lockers.
+ */
+ if (unlikely(!spin_trylock(ptl)))
+ goto fail;
+ if (!mmap_seq_read_check(vmf->vma->vm_mm, vmf->seq))
+ goto unlock_fail;
+ local_irq_enable();
+ vmf->pte = pte;
+ vmf->ptl = ptl;
+ return true;
+
+unlock_fail:
+ spin_unlock(ptl);
+fail:
+ if (pte)
+ pte_unmap(pte);
+ local_irq_enable();
+ return false;
+}
+
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
/*
* handle_pte_fault chooses page fault handler according to an entry which was
* read non-atomically. Before making any commitment, on those architectures
--
2.20.1
\
 
 \ /
  Last update: 2021-04-07 03:52    [W:0.597 / U:0.080 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site