Messages in this thread Patch in this message | | | From | Frederic Weisbecker <> | Subject | [PATCH 05/32] locking/lockdep: Prepare valid_state() to handle plain masks | Date | Tue, 12 Feb 2019 18:13:56 +0100 |
| |
mark_lock_irq() is going to deal with lock usages that gather multiple softirq vectors at once. Therefore the validation through valid_state() will need to handle expanded usage masks.
So enhance valid_state() to that purpose.
Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Mauro Carvalho Chehab <mchehab@s-opensource.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Pavan Kondeti <pkondeti@codeaurora.org> Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com> Cc: David S . Miller <davem@davemloft.net> Cc: Ingo Molnar <mingo@kernel.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> --- kernel/locking/lockdep.c | 44 +++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 578dd57f70d3..6b625b70598a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2538,7 +2538,26 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, static u64 lock_usage_mask(struct lock_usage *usage) { - return BIT(usage->bit); + int nr = 0; + u64 vectors = usage->vector; + u64 mask = 0ULL; + + if (!vectors) + return lock_flag(usage->bit); + + /* Only softirqs can have non-zero vectors */ + WARN_ON_ONCE(usage->bit < LOCK_USED_IN_SOFTIRQ || + usage->bit > LOCK_ENABLED_SOFTIRQ_READ); + + while (vectors) { + long fs = __ffs64(vectors) + 1; + + vectors >>= fs; + nr += fs; + mask |= lock_flag(usage->bit) << (4 * (nr - 1)); + } + + return mask; } /* @@ -2546,10 +2565,23 @@ static u64 lock_usage_mask(struct lock_usage *usage) */ static inline int valid_state(struct task_struct *curr, struct held_lock *this, - enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) + u64 new_mask, u64 bad_mask) { - if (unlikely(hlock_class(this)->usage_mask & lock_flag(bad_bit))) + u64 bad_intersec; + + bad_intersec = hlock_class(this)->usage_mask & bad_mask; + + if (unlikely(bad_intersec)) { + enum lock_usage_bit new_bit, bad_bit; + int err; + + err = find_exclusive_match(new_mask, + bad_intersec, &new_bit, &bad_bit); + if (WARN_ON_ONCE(err < 0)) + return err; + return print_usage_bug(curr, this, bad_bit, new_bit); + } return 1; } @@ -2746,7 +2778,8 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, * Validate that this particular lock does not have conflicting * usage states. */ - if (!valid_state(curr, this, new_usage->bit, excl_usage.bit)) + if (!valid_state(curr, this, lock_usage_mask(new_usage), + lock_usage_mask(&excl_usage))) return 0; /* @@ -2762,7 +2795,8 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, */ if (!read) { excl_usage.bit += LOCK_USAGE_READ_MASK; - if (!valid_state(curr, this, new_usage->bit, excl_usage.bit)) + if (!valid_state(curr, this, lock_usage_mask(new_usage), + lock_usage_mask(&excl_usage))) return 0; if (STRICT_READ_CHECKS && -- 2.17.1
| |