lkml.org 
[lkml]   [2020]   [Apr]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH v5 09/10] mmap locking API: add mmap_assert_locked
From
Add mmap_assert_locked to assert that mmap_sem is held.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
fs/userfaultfd.c | 6 +++---
include/linux/mmap_lock.h | 10 ++++++++++
mm/gup.c | 2 +-
mm/memory.c | 2 +-
4 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 9c645eee1a59..12b492409040 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
pte_t *ptep, pte;
bool ret = true;

- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);

ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));

@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pte_t *pte;
bool ret = true;

- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);

pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
@@ -405,7 +405,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* Coredumping runs without mmap_sem so we can only check that
* the mmap_sem is held, if PF_DUMPCORE was not set.
*/
- WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);

ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 9e104835a0d1..f7a3a9550cc5 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H

+#include <linux/mmdebug.h>
+
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem),

@@ -73,4 +75,12 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
up_read_non_owner(&mm->mmap_sem);
}

+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP) && debug_locks)
+ VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_sem), mm);
+ else
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+}
+
#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/mm/gup.c b/mm/gup.c
index 0404e52513b2..e12993ceb711 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1403,7 +1403,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+ mmap_assert_locked(mm);

gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
diff --git a/mm/memory.c b/mm/memory.c
index e6dd3309c5a3..20f98ea8968e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1214,7 +1214,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) {
- VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+ mmap_assert_locked(tlb->mm);
split_huge_pud(vma, pud, addr);
} else if (zap_huge_pud(tlb, vma, pud, addr))
goto next;
--
2.26.1.301.g55bc3eb7cb9-goog
\
 
 \ /
  Last update: 2020-04-22 02:16    [W:0.193 / U:1.920 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site