lkml.org 
[lkml]   [2019]   [Apr]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/6] vfio/spapr_tce: drop mmap_sem now that locked_vm is atomic
Date
With locked_vm now an atomic, there is no need to take mmap_sem as
writer. Delete and refactor accordingly.

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: <linux-mm@kvack.org>
Cc: <kvm@vger.kernel.org>
Cc: <linux-kernel@vger.kernel.org>
---
drivers/vfio/vfio_iommu_spapr_tce.c | 36 ++++++++++++-----------------
1 file changed, 15 insertions(+), 21 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index e7d787e5d839..7675a3b28410 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -36,8 +36,9 @@ static void tce_iommu_detach_group(void *iommu_data,

static long try_increment_locked_vm(struct mm_struct *mm, long npages)
{
- long ret = 0, lock_limit;
+ long ret = 0;
s64 locked;
+ unsigned long lock_limit;

if (WARN_ON_ONCE(!mm))
return -EPERM;
@@ -45,39 +46,32 @@ static long try_increment_locked_vm(struct mm_struct *mm, long npages)
if (!npages)
return 0;

- down_write(&mm->mmap_sem);
- locked = atomic64_read(&mm->locked_vm) + npages;
+ locked = atomic64_add_return(npages, &mm->locked_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
- else
- atomic64_add(npages, &mm->locked_vm);
-
- pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
- npages << PAGE_SHIFT,
- atomic64_read(&mm->locked_vm) << PAGE_SHIFT,
- rlimit(RLIMIT_MEMLOCK),
- ret ? " - exceeded" : "");
+ atomic64_sub(npages, &mm->locked_vm);
+ }

- up_write(&mm->mmap_sem);
+ pr_debug("[%d] RLIMIT_MEMLOCK +%ld %lld/%lu%s\n", current->pid,
+ npages << PAGE_SHIFT, locked << PAGE_SHIFT,
+ lock_limit, ret ? " - exceeded" : "");

return ret;
}

static void decrement_locked_vm(struct mm_struct *mm, long npages)
{
+ s64 locked;
+
if (!mm || !npages)
return;

- down_write(&mm->mmap_sem);
- if (WARN_ON_ONCE(npages > atomic64_read(&mm->locked_vm)))
- npages = atomic64_read(&mm->locked_vm);
- atomic64_sub(npages, &mm->locked_vm);
- pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
- npages << PAGE_SHIFT,
- atomic64_read(&mm->locked_vm) << PAGE_SHIFT,
+ locked = atomic64_sub_return(npages, &mm->locked_vm);
+ WARN_ON_ONCE(locked < 0);
+ pr_debug("[%d] RLIMIT_MEMLOCK -%ld %lld/%lu\n", current->pid,
+ npages << PAGE_SHIFT, locked << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
- up_write(&mm->mmap_sem);
}

/*
--
2.21.0
\
 
 \ /
  Last update: 2019-04-02 23:01    [W:1.124 / U:0.984 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site