lkml.org 
[lkml]   [2020]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/1] rcu/tree: Drop the lock before entering to page allocator
Date
If the kernel is built with CONFIG_PROVE_RAW_LOCK_NESTING
option, the lockedp will complain about violation of the
nesting rules. It does the raw_spinlock vs. spinlock nesting
checks.

Internally the kfree_rcu() uses raw_spinlock_t whereas the
page allocator internally deals with spinlock_t to access
to its zones.

In order to prevent such vialation that is in question we
can drop the internal raw_spinlock_t before entering to
the page allocaor.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
kernel/rcu/tree.c | 36 +++++++++++++++++++-----------------
1 file changed, 19 insertions(+), 17 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 21c2fa5bd8c3..7469bd1e5c2c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3278,21 +3278,22 @@ static void kfree_rcu_monitor(struct work_struct *work)
}

static inline bool
-kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
+add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
+ unsigned long *flags, void *ptr)
{
struct kvfree_rcu_bulk_data *bnode;
int idx;

- if (unlikely(!krcp->initialized))
+ *krcp = krc_this_cpu_lock(flags);
+ if (unlikely(!(*krcp)->initialized))
return false;

- lockdep_assert_held(&krcp->lock);
idx = !!is_vmalloc_addr(ptr);

/* Check if a new block is required. */
- if (!krcp->bkvhead[idx] ||
- krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
- bnode = get_cached_bnode(krcp);
+ if (!(*krcp)->bkvhead[idx] ||
+ (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
+ bnode = get_cached_bnode(*krcp);
if (!bnode) {
/*
* To keep this path working on raw non-preemptible
@@ -3306,6 +3307,9 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
if (IS_ENABLED(CONFIG_PREEMPT_RT))
return false;

+ preempt_disable();
+ krc_this_cpu_unlock(*krcp, *flags);
+
/*
* NOTE: For one argument of kvfree_rcu() we can
* drop the lock and get the page in sleepable
@@ -3315,6 +3319,9 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
*/
bnode = (struct kvfree_rcu_bulk_data *)
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+
+ *krcp = krc_this_cpu_lock(flags);
+ preempt_enable();
}

/* Switch to emergency path. */
@@ -3323,15 +3330,15 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)

/* Initialize the new block. */
bnode->nr_records = 0;
- bnode->next = krcp->bkvhead[idx];
+ bnode->next = (*krcp)->bkvhead[idx];

/* Attach it to the head. */
- krcp->bkvhead[idx] = bnode;
+ (*krcp)->bkvhead[idx] = bnode;
}

/* Finally insert. */
- krcp->bkvhead[idx]->records
- [krcp->bkvhead[idx]->nr_records++] = ptr;
+ (*krcp)->bkvhead[idx]->records
+ [(*krcp)->bkvhead[idx]->nr_records++] = ptr;

return true;
}
@@ -3369,24 +3376,19 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
ptr = (unsigned long *) func;
}

- krcp = krc_this_cpu_lock(&flags);
-
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
// Probable double kfree_rcu(), just leak.
WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
__func__, head);
-
- // Mark as success and leave.
- success = true;
- goto unlock_return;
+ return;
}

/*
* Under high memory pressure GFP_NOWAIT can fail,
* in that case the emergency path is maintained.
*/
- success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
+ success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr);
if (!success) {
if (head == NULL)
// Inline if kvfree_rcu(one_arg) call.
--
2.20.1
\
 
 \ /
  Last update: 2020-07-15 20:37    [W:0.276 / U:0.808 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site