lkml.org 
[lkml]   [2019]   [Jul]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 3/4] fs/buffer: Substitute BH_Uptodate_Lock for RT and bit spinlock debugging
Bit spinlocks are problematic if PREEMPT_RT is enabled. They disable
preemption, which is undesired for latency reasons and breaks when regular
spinlocks are taken within the bit_spinlock locked region because regular
spinlocks are converted to 'sleeping spinlocks' on RT.

Substitute the BH_Uptodate_Lock bit spinlock with a regular spinlock for
PREEMPT_RT enabled kernels.

Bit spinlocks are also not covered by lock debugging, e.g. lockdep. With
the spinlock substitution in place, they can be exposed via
CONFIG_DEBUG_BIT_SPINLOCKS.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: linux-fsdevel@vger.kernel.org
---
fs/buffer.c | 1 +
include/linux/buffer_head.h | 31 +++++++++++++++++++++++++++++++
2 files changed, 32 insertions(+)

--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3360,6 +3360,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -76,8 +76,35 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_DEBUG_BIT_SPINLOCKS)
+ spinlock_t b_uptodate_lock;
+#endif
};

+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_DEBUG_BIT_SPINLOCKS)
+
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
+ return flags;
+}
+
+static inline void
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
+{
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
+}
+
+static inline void buffer_head_init_locks(struct buffer_head *bh)
+{
+ spin_lock_init(&bh->b_uptodate_lock);
+}
+
+#else /* PREEMPT_RT || DEBUG_BIT_SPINLOCKS */
+
static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
{
unsigned long flags;
@@ -94,6 +121,10 @@ bh_uptodate_unlock_irqrestore(struct buf
local_irq_restore(flags);
}

+static inline void buffer_head_init_locks(struct buffer_head *bh) { }
+
+#endif /* !PREEMPT_RT && !DEBUG_BIT_SPINLOCKS */
+
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.

\
 
 \ /
  Last update: 2019-07-30 14:09    [W:0.068 / U:1.828 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site