Messages in this thread Patch in this message | | | From | Bart Van Assche <> | Subject | [PATCH v6 09/16] locking/lockdep: Reuse list entries that are no longer in use | Date | Wed, 9 Jan 2019 13:01:57 -0800 |
| |
Instead of abandoning elements of list_entries[] that are no longer in use, make alloc_list_entry() reuse array elements that have been freed.
Cc: Peter Zijlstra <peterz@infradead.org> Cc: Waiman Long <longman@redhat.com> Cc: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Bart Van Assche <bvanassche@acm.org> --- kernel/locking/lockdep.c | 48 +++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 5b142f699503..5e8a3a17bb94 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -45,6 +45,7 @@ #include <linux/hash.h> #include <linux/ftrace.h> #include <linux/stringify.h> +#include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/random.h> @@ -132,6 +133,7 @@ static inline int debug_locks_off_graph_unlock(void) unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; +static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); /* * All data structures here are protected by the global debug_lock. @@ -296,6 +298,7 @@ static struct pending_free { struct list_head zapped_classes; struct rcu_head rcu_head; bool scheduled; + DECLARE_BITMAP(list_entries_being_freed, MAX_LOCKDEP_ENTRIES); } pending_free[2]; static DECLARE_WAIT_QUEUE_HEAD(rcu_cb); @@ -756,6 +759,19 @@ static bool assign_lock_key(struct lockdep_map *lock) return true; } +static bool list_entry_being_freed(int list_entry_idx) +{ + struct pending_free *pf; + int i; + + for (i = 0, pf = pending_free; i < ARRAY_SIZE(pending_free); + i++, pf++) + if (test_bit(list_entry_idx, pf->list_entries_being_freed)) + return true; + + return false; +} + /* * Initialize the lock_classes[] array elements, the free_lock_classes list * and also the pending_free[] array. @@ -896,7 +912,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) */ static struct lock_list *alloc_list_entry(void) { - if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { + int idx = find_first_zero_bit(list_entries_in_use, + ARRAY_SIZE(list_entries)); + + if (idx >= ARRAY_SIZE(list_entries)) { if (!debug_locks_off_graph_unlock()) return NULL; @@ -904,7 +923,9 @@ static struct lock_list *alloc_list_entry(void) dump_stack(); return NULL; } - return list_entries + nr_list_entries++; + nr_list_entries++; + __set_bit(idx, list_entries_in_use); + return list_entries + idx; } /* @@ -1008,7 +1029,7 @@ static inline void mark_lock_accessed(struct lock_list *lock, unsigned long nr; nr = lock - list_entries; - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ + WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ lock->parent = parent; lock->class->dep_gen_id = lockdep_dependency_gen_id; } @@ -1018,7 +1039,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock) unsigned long nr; nr = lock - list_entries; - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ + WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ return lock->class->dep_gen_id == lockdep_dependency_gen_id; } @@ -4277,13 +4298,15 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) * Remove all dependencies this lock is * involved in: */ - for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) { + for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { + entry = list_entries + i; if (entry->class != class && entry->links_to != class) continue; + if (list_entry_being_freed(i)) + continue; + set_bit(i, pf->list_entries_being_freed); + nr_list_entries--; list_del_rcu(&entry->entry); - /* Clear .class and .links_to to avoid double removal. */ - WRITE_ONCE(entry->class, NULL); - WRITE_ONCE(entry->links_to, NULL); } if (list_empty(&class->locks_after) && list_empty(&class->locks_before)) { @@ -4325,8 +4348,9 @@ static bool inside_selftest(void) } /* - * Free all lock classes that are on the pf->zapped_classes list. May be called - * from RCU callback context. + * Free all lock classes that are on the pf->zapped_classes list and also all + * list entries that have been marked as being freed. May be called from RCU + * callback context. */ static void free_zapped_classes(struct rcu_head *ch) { @@ -4342,6 +4366,9 @@ static void free_zapped_classes(struct rcu_head *ch) reinit_class(class); } list_splice_init(&pf->zapped_classes, &free_lock_classes); + bitmap_andnot(list_entries_in_use, list_entries_in_use, + pf->list_entries_being_freed, ARRAY_SIZE(list_entries)); + bitmap_clear(pf->list_entries_being_freed, 0, ARRAY_SIZE(list_entries)); graph_unlock(); restore_irqs: raw_local_irq_restore(flags); @@ -4626,6 +4653,7 @@ void __init lockdep_init(void) printk(" memory used by lock dependency info: %zu kB\n", (sizeof(list_entries) + + sizeof(list_entries_in_use) + sizeof(lock_classes) + sizeof(classhash_table) + sizeof(chainhash_table) + -- 2.20.1.97.g81188d93c3-goog
| |