lkml.org 
[lkml]   [2023]   [Jan]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list
Date
Currently, racing protection on conn list is implemented as rwlock,
improve it by rcu primitive for its better performance.

Signed-off-by: Dawei Li <set_pte_at@outlook.com>
---
fs/ksmbd/connection.c | 30 +++++++++++++++---------------
fs/ksmbd/connection.h | 1 -
fs/ksmbd/smb2pdu.c | 14 +++++++-------
3 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index fd0a288af299..36d1da273edd 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
static struct ksmbd_conn_ops default_conn_ops;

LIST_HEAD(conn_list);
-DEFINE_RWLOCK(conn_list_lock);
+DEFINE_SPINLOCK(conn_list_lock);

/**
* ksmbd_conn_free() - free resources of the connection instance
@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
*/
void ksmbd_conn_free(struct ksmbd_conn *conn)
{
- write_lock(&conn_list_lock);
- list_del(&conn->conns_list);
- write_unlock(&conn_list_lock);
+ spin_lock(&conn_list_lock);
+ list_del_rcu(&conn->conns_list);
+ spin_unlock(&conn_list_lock);

xa_destroy(&conn->sessions);
kvfree(conn->request_buf);
@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
spin_lock_init(&conn->llist_lock);
INIT_LIST_HEAD(&conn->lock_list);

- write_lock(&conn_list_lock);
- list_add(&conn->conns_list, &conn_list);
- write_unlock(&conn_list_lock);
+ spin_lock(&conn_list_lock);
+ list_add_rcu(&conn->conns_list, &conn_list);
+ spin_unlock(&conn_list_lock);
return conn;
}

@@ -95,15 +95,15 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
struct ksmbd_conn *t;
bool ret = false;

- read_lock(&conn_list_lock);
- list_for_each_entry(t, &conn_list, conns_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &conn_list, conns_list) {
if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
continue;

ret = true;
break;
}
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
return ret;
}

@@ -402,8 +402,8 @@ static void stop_sessions(void)
struct ksmbd_transport *t;

again:
- read_lock(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(conn, &conn_list, conns_list) {
struct task_struct *task;

t = conn->transport;
@@ -413,12 +413,12 @@ static void stop_sessions(void)
task->comm, task_pid_nr(task));
conn->status = KSMBD_SESS_EXITING;
if (t->ops->shutdown) {
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
t->ops->shutdown(t);
- read_lock(&conn_list_lock);
+ rcu_read_lock();
}
}
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();

if (!list_empty(&conn_list)) {
schedule_timeout_interruptible(HZ / 10); /* 100ms */
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
index 3643354a3fa7..e41f33a2da7c 100644
--- a/fs/ksmbd/connection.h
+++ b/fs/ksmbd/connection.h
@@ -139,7 +139,6 @@ struct ksmbd_transport {
#define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))

extern struct list_head conn_list;
-extern rwlock_t conn_list_lock;

bool ksmbd_conn_alive(struct ksmbd_conn *conn);
void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 771e045c8d4a..c60cfb360f42 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -6915,8 +6915,8 @@ int smb2_lock(struct ksmbd_work *work)

nolock = 1;
/* check locks in connection list */
- read_lock(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(conn, &conn_list, conns_list) {
spin_lock(&conn->llist_lock);
list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
if (file_inode(cmp_lock->fl->fl_file) !=
@@ -6932,7 +6932,7 @@ int smb2_lock(struct ksmbd_work *work)
list_del(&cmp_lock->flist);
list_del(&cmp_lock->clist);
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();

locks_free_lock(cmp_lock->fl);
kfree(cmp_lock);
@@ -6954,7 +6954,7 @@ int smb2_lock(struct ksmbd_work *work)
cmp_lock->start > smb_lock->start &&
cmp_lock->start < smb_lock->end) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
pr_err("previous lock conflict with zero byte lock range\n");
goto out;
}
@@ -6963,7 +6963,7 @@ int smb2_lock(struct ksmbd_work *work)
smb_lock->start > cmp_lock->start &&
smb_lock->start < cmp_lock->end) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
pr_err("current lock conflict with zero byte lock range\n");
goto out;
}
@@ -6974,14 +6974,14 @@ int smb2_lock(struct ksmbd_work *work)
cmp_lock->end >= smb_lock->end)) &&
!cmp_lock->zero_len && !smb_lock->zero_len) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
pr_err("Not allow lock operation on exclusive lock range\n");
goto out;
}
}
spin_unlock(&conn->llist_lock);
}
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
out_check_cl:
if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
pr_err("Try to unlock nolocked range\n");
--
2.25.1
\
 
 \ /
  Last update: 2023-03-26 23:41    [W:0.078 / U:0.100 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site