Messages in this thread Patch in this message | | | Date | Sun, 01 Nov 2020 13:56:01 -0000 | From | Thomas Gleixner <> | Subject | [GIT pull] locking/urgent for v5.10-rc2 |
| |
Linus,
please pull the latest locking/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking-urgent-2020-11-01
up to: 1a39340865ce: lockdep: Fix nr_unused_locks accounting
A couple of locking fixes:
- Fix incorrect failure injection handling on the fuxtex code
- Prevent a preemption warning in lockdep when tracking local_irq_enable() and interrupts are already enabled
- Remove more raw_cpu_read() usage from lockdep which causes state corruption on !X86 architectures.
- Make the nr_unused_locks accounting in lockdep correct again.
Thanks,
tglx
------------------> Mateusz Nosek (1): futex: Fix incorrect should_fail_futex() handling
Peter Zijlstra (3): lockdep: Fix preemption WARN for spurious IRQ-enable locking/lockdep: Remove more raw_cpu_read() usage lockdep: Fix nr_unused_locks accounting
kernel/futex.c | 4 +++- kernel/locking/lockdep.c | 20 +++++++------------- 2 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/kernel/futex.c b/kernel/futex.c index a5876694a60e..39681bf8b06c 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1502,8 +1502,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - if (unlikely(should_fail_futex(true))) + if (unlikely(should_fail_futex(true))) { ret = -EFAULT; + goto out_unlock; + } ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); if (!ret && (curval != uval)) { diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 3e99dfef8408..b71ad8d9f1c9 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -84,7 +84,7 @@ static inline bool lockdep_enabled(void) if (!debug_locks) return false; - if (raw_cpu_read(lockdep_recursion)) + if (this_cpu_read(lockdep_recursion)) return false; if (current->lockdep_recursion) @@ -4057,7 +4057,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) if (unlikely(in_nmi())) return; - if (unlikely(__this_cpu_read(lockdep_recursion))) + if (unlikely(this_cpu_read(lockdep_recursion))) return; if (unlikely(lockdep_hardirqs_enabled())) { @@ -4126,7 +4126,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) goto skip_checks; } - if (unlikely(__this_cpu_read(lockdep_recursion))) + if (unlikely(this_cpu_read(lockdep_recursion))) return; if (lockdep_hardirqs_enabled()) { @@ -4396,6 +4396,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, if (unlikely(hlock_class(this)->usage_mask & new_mask)) goto unlock; + if (!hlock_class(this)->usage_mask) + debug_atomic_dec(nr_unused_locks); + hlock_class(this)->usage_mask |= new_mask; if (new_bit < LOCK_TRACE_STATES) { @@ -4403,19 +4406,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, return 0; } - switch (new_bit) { - case 0 ... LOCK_USED-1: + if (new_bit < LOCK_USED) { ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; - break; - - case LOCK_USED: - debug_atomic_dec(nr_unused_locks); - break; - - default: - break; } unlock:
| |