lkml.org 
[lkml]   [2020]   [May]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 2/8] radix-tree: Use local_lock for protection
On Tue, May 19, 2020 at 10:19:06PM +0200, Sebastian Andrzej Siewior wrote:
> @@ -64,6 +64,7 @@ struct radix_tree_preload {
> struct radix_tree_node *nodes;
> };
> static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
> +static DEFINE_LOCAL_LOCK(radix_tree_preloads_lock);
>
> static inline struct radix_tree_node *entry_to_node(void *ptr)
> {

So I'm all with Andrew on the naming and pass-by-pointer thing, but
also, the above is pretty crap. You want the local_lock to be in the
structure you're actually protecting, and there really isn't anything
stopping you from doing that.

The below builds just fine and is ever so much more sensible.

--- a/include/linux/locallock_internal.h
+++ b/include/linux/locallock_internal.h
@@ -19,7 +19,7 @@ struct local_lock {
# define LL_DEP_MAP_INIT(lockname)
#endif

-#define INIT_LOCAL_LOCK(lockname) LL_DEP_MAP_INIT(lockname)
+#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }

#define local_lock_lockdep_init(lock) \
do { \
@@ -63,35 +63,35 @@ static inline void local_lock_release(st
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(&(lock))); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
} while (0)

#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(&(lock))); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
} while (0)

#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(&(lock))); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
} while (0)

#define __local_unlock(lock) \
do { \
- local_lock_release(this_cpu_ptr(&lock)); \
+ local_lock_release(this_cpu_ptr(lock)); \
preempt_enable(); \
} while (0)

#define __local_unlock_irq(lock) \
do { \
- local_lock_release(this_cpu_ptr(&lock)); \
+ local_lock_release(this_cpu_ptr(lock)); \
local_irq_enable(); \
} while (0)

#define __local_unlock_irqrestore(lock, flags) \
do { \
- local_lock_release(this_cpu_ptr(&lock)); \
+ local_lock_release(this_cpu_ptr(lock)); \
local_irq_restore(flags); \
} while (0)
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -59,12 +59,14 @@ struct kmem_cache *radix_tree_node_cache
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
+ struct local_lock lock;
unsigned nr;
/* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes;
};
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
-static DEFINE_LOCAL_LOCK(radix_tree_preloads_lock);
+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) =
+ { .lock = INIT_LOCAL_LOCK(lock),
+ .nr = 0, };

static inline struct radix_tree_node *entry_to_node(void *ptr)
{
@@ -333,14 +335,14 @@ static __must_check int __radix_tree_pre
*/
gfp_mask &= ~__GFP_ACCOUNT;

- local_lock(radix_tree_preloads_lock);
+ local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
- local_unlock(radix_tree_preloads_lock);
+ local_unlock(&radix_tree_preloads.lock);
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
- local_lock(radix_tree_preloads_lock);
+ local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
@@ -382,14 +384,14 @@ int radix_tree_maybe_preload(gfp_t gfp_m
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
- local_lock(radix_tree_preloads_lock);
+ local_lock(&radix_tree_preloads.lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);

void radix_tree_preload_end(void)
{
- local_unlock(radix_tree_preloads_lock);
+ local_unlock(&radix_tree_preloads.lock);
}
EXPORT_SYMBOL(radix_tree_preload_end);

@@ -1477,13 +1479,13 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
- local_lock(radix_tree_preloads_lock);
+ local_lock(&radix_tree_preloads.lock);
}
EXPORT_SYMBOL(idr_preload);

void idr_preload_end(void)
{
- local_unlock(radix_tree_preloads_lock);
+ local_unlock(&radix_tree_preloads.lock);
}
EXPORT_SYMBOL(idr_preload_end);

\
 
 \ /
  Last update: 2020-05-20 12:22    [W:0.174 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site