lkml.org 
[lkml]   [2019]   [Aug]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 hmm 02/11] mm/mmu_notifiers: do not speculatively allocate a mmu_notifier_mm
    Date
    From: Jason Gunthorpe <jgg@mellanox.com>

    A prior commit e0f3c3f78da2 ("mm/mmu_notifier: init notifier if necessary")
    made an attempt at doing this, but had to be reverted as calling
    the GFP_KERNEL allocator under the i_mmap_mutex causes deadlock, see
    commit 35cfa2b0b491 ("mm/mmu_notifier: allocate mmu_notifier in advance").

    However, we can avoid that problem by doing the allocation only under
    the mmap_sem, which is already happening.

    Since all writers to mm->mmu_notifier_mm hold the write side of the
    mmap_sem reading it under that sem is deterministic and we can use that to
    decide if the allocation path is required, without speculation.

    The actual update to mmu_notifier_mm must still be done under the
    mm_take_all_locks() to ensure read-side coherency.

    Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
    ---
    mm/mmu_notifier.c | 34 ++++++++++++++++++++++------------
    1 file changed, 22 insertions(+), 12 deletions(-)

    diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
    index 218a6f108bc2d0..696810f632ade1 100644
    --- a/mm/mmu_notifier.c
    +++ b/mm/mmu_notifier.c
    @@ -242,27 +242,32 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
    */
    int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
    {
    - struct mmu_notifier_mm *mmu_notifier_mm;
    + struct mmu_notifier_mm *mmu_notifier_mm = NULL;
    int ret;

    lockdep_assert_held_write(&mm->mmap_sem);
    BUG_ON(atomic_read(&mm->mm_users) <= 0);

    - mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
    - if (unlikely(!mmu_notifier_mm))
    - return -ENOMEM;
    + if (!mm->mmu_notifier_mm) {
    + /*
    + * kmalloc cannot be called under mm_take_all_locks(), but we
    + * know that mm->mmu_notifier_mm can't change while we hold
    + * the write side of the mmap_sem.
    + */
    + mmu_notifier_mm =
    + kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
    + if (!mmu_notifier_mm)
    + return -ENOMEM;
    +
    + INIT_HLIST_HEAD(&mmu_notifier_mm->list);
    + spin_lock_init(&mmu_notifier_mm->lock);
    + }

    ret = mm_take_all_locks(mm);
    if (unlikely(ret))
    goto out_clean;

    - if (!mm_has_notifiers(mm)) {
    - INIT_HLIST_HEAD(&mmu_notifier_mm->list);
    - spin_lock_init(&mmu_notifier_mm->lock);
    -
    - mm->mmu_notifier_mm = mmu_notifier_mm;
    - mmu_notifier_mm = NULL;
    - }
    + /* Pairs with the mmdrop in mmu_notifier_unregister_* */
    mmgrab(mm);

    /*
    @@ -273,14 +278,19 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
    * We can't race against any other mmu notifier method either
    * thanks to mm_take_all_locks().
    */
    + if (mmu_notifier_mm)
    + mm->mmu_notifier_mm = mmu_notifier_mm;
    +
    spin_lock(&mm->mmu_notifier_mm->lock);
    hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
    spin_unlock(&mm->mmu_notifier_mm->lock);

    mm_drop_all_locks(mm);
    + BUG_ON(atomic_read(&mm->mm_users) <= 0);
    + return 0;
    +
    out_clean:
    kfree(mmu_notifier_mm);
    - BUG_ON(atomic_read(&mm->mm_users) <= 0);
    return ret;
    }
    EXPORT_SYMBOL_GPL(__mmu_notifier_register);
    --
    2.22.0
    \
     
     \ /
      Last update: 2019-08-07 01:29    [W:4.160 / U:0.316 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site