lkml.org 
[lkml]   [2021]   [Jul]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 14/16] mm: user_shm_lock(,,getuc) and user_shm_unlock(,,putuc)
user_shm_lock() and user_shm_unlock() have to get and put a reference on
the ucounts structure, and get fails at overflow. That will be awkward
for the next commit (shrinking ought not to fail), so add an argument
(always true in this commit) to condition that get and put. It would
be even easier to do the put_ucounts() separately when unlocking, but
messy for the get_ucounts() when locking: better to keep them symmetric.

Signed-off-by: Hugh Dickins <hughd@google.com>
---
fs/hugetlbfs/inode.c | 4 ++--
include/linux/mm.h | 4 ++--
ipc/shm.c | 4 ++--
mm/mlock.c | 9 +++++----
mm/shmem.c | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index cdfb1ae78a3f..381902288f4d 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1465,7 +1465,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size,

if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
*ucounts = current_ucounts();
- if (user_shm_lock(size, *ucounts)) {
+ if (user_shm_lock(size, *ucounts, true)) {
task_lock(current);
pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
current->comm, current->pid);
@@ -1499,7 +1499,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
iput(inode);
out:
if (*ucounts) {
- user_shm_unlock(size, *ucounts);
+ user_shm_unlock(size, *ucounts, true);
*ucounts = NULL;
}
return file;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f1be2221512b..43cb5a6f97ff 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1713,8 +1713,8 @@ extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
-extern bool user_shm_lock(loff_t size, struct ucounts *ucounts);
-extern void user_shm_unlock(loff_t size, struct ucounts *ucounts);
+extern bool user_shm_lock(loff_t size, struct ucounts *ucounts, bool getuc);
+extern void user_shm_unlock(loff_t size, struct ucounts *ucounts, bool putuc);

/*
* Parameter block passed down to zap_pte_range in exceptional cases.
diff --git a/ipc/shm.c b/ipc/shm.c
index 748933e376ca..3e63809d38b7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -289,7 +289,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
shmem_lock(shm_file, 0, shp->mlock_ucounts);
else if (shp->mlock_ucounts)
user_shm_unlock(i_size_read(file_inode(shm_file)),
- shp->mlock_ucounts);
+ shp->mlock_ucounts, true);
fput(shm_file);
ipc_update_pid(&shp->shm_cprid, NULL);
ipc_update_pid(&shp->shm_lprid, NULL);
@@ -699,7 +699,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
ipc_update_pid(&shp->shm_cprid, NULL);
ipc_update_pid(&shp->shm_lprid, NULL);
if (is_file_hugepages(file) && shp->mlock_ucounts)
- user_shm_unlock(size, shp->mlock_ucounts);
+ user_shm_unlock(size, shp->mlock_ucounts, true);
fput(file);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
return error;
diff --git a/mm/mlock.c b/mm/mlock.c
index 7df88fce0fc9..5afa3eba9a13 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -818,7 +818,7 @@ SYSCALL_DEFINE0(munlockall)
*/
static DEFINE_SPINLOCK(shmlock_user_lock);

-bool user_shm_lock(loff_t size, struct ucounts *ucounts)
+bool user_shm_lock(loff_t size, struct ucounts *ucounts, bool getuc)
{
unsigned long lock_limit, locked;
long memlock;
@@ -836,7 +836,7 @@ bool user_shm_lock(loff_t size, struct ucounts *ucounts)
dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
goto out;
}
- if (!get_ucounts(ucounts)) {
+ if (getuc && !get_ucounts(ucounts)) {
dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
goto out;
}
@@ -846,10 +846,11 @@ bool user_shm_lock(loff_t size, struct ucounts *ucounts)
return allowed;
}

-void user_shm_unlock(loff_t size, struct ucounts *ucounts)
+void user_shm_unlock(loff_t size, struct ucounts *ucounts, bool putuc)
{
spin_lock(&shmlock_user_lock);
dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
spin_unlock(&shmlock_user_lock);
- put_ucounts(ucounts);
+ if (putuc)
+ put_ucounts(ucounts);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 35c0f5c7120e..1ddb910e976c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1163,7 +1163,7 @@ static void shmem_evict_inode(struct inode *inode)

if (shmem_mapping(inode->i_mapping)) {
if (info->mlock_ucounts) {
- user_shm_unlock(inode->i_size, info->mlock_ucounts);
+ user_shm_unlock(inode->i_size, info->mlock_ucounts, true);
info->mlock_ucounts = NULL;
}
shmem_unacct_size(info->flags, inode->i_size);
@@ -2276,13 +2276,13 @@ int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
* no serialization needed when called from shm_destroy().
*/
if (lock && !(info->flags & VM_LOCKED)) {
- if (!user_shm_lock(inode->i_size, ucounts))
+ if (!user_shm_lock(inode->i_size, ucounts, true))
goto out_nomem;
info->flags |= VM_LOCKED;
mapping_set_unevictable(file->f_mapping);
}
if (!lock && (info->flags & VM_LOCKED) && ucounts) {
- user_shm_unlock(inode->i_size, ucounts);
+ user_shm_unlock(inode->i_size, ucounts, true);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
}
--
2.26.2
\
 
 \ /
  Last update: 2021-07-30 10:07    [W:0.223 / U:0.408 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site