lkml.org 
[lkml]   [2022]   [Jun]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v5 3/4] kernfs: Introduce interface to access global kernfs_open_file_mutex.
Date
This allows to change underlying mutex locking, without needing to change
the users of the lock. For example next patch modifies this interface to
use hashed mutexes in place of a single global kernfs_open_file_mutex.

Signed-off-by: Imran Khan <imran.f.khan@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
---
fs/kernfs/file.c | 60 ++++++++++++++++++++++++++++++++----------------
1 file changed, 40 insertions(+), 20 deletions(-)

diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 0cc288dcf51d9..205b5c71f30cb 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -49,6 +49,22 @@ struct kernfs_open_node {

static LLIST_HEAD(kernfs_notify_list);

+static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
+{
+ return &kernfs_open_file_mutex;
+}
+
+static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
+{
+ struct mutex *lock;
+
+ lock = kernfs_open_file_mutex_ptr(kn);
+
+ mutex_lock(lock);
+
+ return lock;
+}
+
/**
* kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn.
*
@@ -56,7 +72,7 @@ static LLIST_HEAD(kernfs_notify_list);
* @kn: target kernfs_node.
*
* Fetch and return ->attr.open of @kn if @of->list is not empty or if
- * kernfs_open_file_mutex is held.
+ * kernfs_open_file_mutex_ptr(kn) is held.
*
* We can rely on either of the following 2 conditions to ensure that
* dereferencing ->attr.open outside RCU read-side critical section is safe.
@@ -68,13 +84,13 @@ static LLIST_HEAD(kernfs_notify_list);
*
* or
*
- * 2. Update of ->attr.open happens under kernfs_open_file_mutex. So as long as
- * the current updater (caller) is holding this mutex, other updaters will not
- * be able to change ->attr.open and this means that we can safely deref
- * ->attr.open outside RCU read-side critical section.
+ * 2. Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So as
+ * long as the current updater (caller) holds this mutex, other updaters can't
+ * change ->attr.open and this means that we can safely deref ->attr.open
+ * outside RCU read-side critical section.
*
* The caller needs to make sure that either @of->list is not empty or
- * kernfs_open_file_mutex is held.
+ * kernfs_open_file_mutex_ptr(kn) is held.
*/
static struct kernfs_open_node *
kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
@@ -83,7 +99,7 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
bool deref_ok;

deref_ok = (of ? !list_empty(&of->list) : false) ||
- lockdep_is_held(&kernfs_open_file_mutex);
+ lockdep_is_held(kernfs_open_file_mutex_ptr(kn));

on = rcu_dereference_check(kn->attr.open, deref_ok);

@@ -570,19 +586,20 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on, *new_on = NULL;
+ struct mutex *mutex = NULL;

- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node(NULL, kn);

if (on) {
list_add_tail(&of->list, &on->files);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return 0;
} else {
/* not there, initialize a new one */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
if (!new_on) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return -ENOMEM;
}
atomic_set(&new_on->event, 1);
@@ -591,7 +608,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
list_add_tail(&of->list, &new_on->files);
rcu_assign_pointer(kn->attr.open, new_on);
}
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);

return 0;
}
@@ -613,12 +630,13 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on;
+ struct mutex *mutex = NULL;

- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);

on = kernfs_deref_open_node(NULL, kn);
if (!on) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return;
}

@@ -630,7 +648,7 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
kfree_rcu(on, rcu_head);
}

- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}

static int kernfs_fop_open(struct inode *inode, struct file *file)
@@ -772,7 +790,7 @@ static void kernfs_release_file(struct kernfs_node *kn,
* here because drain path may be called from places which can
* cause circular dependency.
*/
- lockdep_assert_held(&kernfs_open_file_mutex);
+ lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));

if (!of->released) {
/*
@@ -789,11 +807,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
{
struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp);
+ struct mutex *mutex = NULL;

if (kn->flags & KERNFS_HAS_RELEASE) {
- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}

kernfs_unlink_open_file(kn, of);
@@ -808,6 +827,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
+ struct mutex *mutex = NULL;

if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return;
@@ -823,10 +843,10 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
if (!rcu_access_pointer(kn->attr.open))
return;

- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node(NULL, kn);
if (!on) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return;
}

@@ -840,7 +860,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
kernfs_release_file(kn, of);
}

- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}

/*
--
2.30.2
\
 
 \ /
  Last update: 2022-06-14 04:49    [W:0.067 / U:0.424 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site