lkml.org 
[lkml]   [2018]   [Nov]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 24/27] locking/lockdep: Introduce __lockdep_free_key_range()
    Date
    This patch does not change any functionality but makes the next patch
    in this series easier to read.

    Signed-off-by: Bart Van Assche <bvanassche@acm.org>
    ---
    kernel/locking/lockdep.c | 35 +++++++++++++++++++++++------------
    1 file changed, 23 insertions(+), 12 deletions(-)

    diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
    index 141bb0662ff5..0e273731d028 100644
    --- a/kernel/locking/lockdep.c
    +++ b/kernel/locking/lockdep.c
    @@ -4459,18 +4459,16 @@ static void free_zapped_classes(struct list_head *zapped_classes)
    }

    /*
    - * Used in module.c to remove lock classes from memory that is going to be
    - * freed; and possibly re-used by other modules.
    - *
    - * We will have had one sync_sched() before getting here, so we're guaranteed
    - * nobody will look up these exact classes -- they're properly dead but still
    - * allocated.
    + * Remove all lock classes from the class hash table and from the
    + * all_lock_classes list whose key or name is in the address range
    + * [start, start + size). Move these lock classes to the
    + * @zapped_classes list.
    */
    -void lockdep_free_key_range(void *start, unsigned long size)
    +static void __lockdep_free_key_range(struct list_head *zapped_classes,
    + void *start, unsigned long size)
    {
    struct lock_class *class;
    struct hlist_head *head;
    - LIST_HEAD(zapped_classes);
    unsigned long flags;
    int i;
    int locked;
    @@ -4478,9 +4476,8 @@ void lockdep_free_key_range(void *start, unsigned long size)
    raw_local_irq_save(flags);
    locked = graph_lock();

    - /*
    - * Unhash all classes that were created by this module:
    - */
    + INIT_LIST_HEAD(zapped_classes);
    +
    for (i = 0; i < CLASSHASH_SIZE; i++) {
    head = classhash_table + i;
    hlist_for_each_entry_rcu(class, head, hash_entry) {
    @@ -4488,14 +4485,28 @@ void lockdep_free_key_range(void *start, unsigned long size)
    (!within(class->key, start, size) &&
    !within(class->name, start, size)))
    continue;
    - zap_class(&zapped_classes, class);
    + zap_class(zapped_classes, class);
    }
    }

    if (locked)
    graph_unlock();
    raw_local_irq_restore(flags);
    +}
    +
    +/*
    + * Used in module.c to remove lock classes from memory that is going to be
    + * freed; and possibly re-used by other modules.
    + *
    + * We will have had one sync_sched() before getting here, so we're guaranteed
    + * nobody will look up these exact classes -- they're properly dead but still
    + * allocated.
    + */
    +void lockdep_free_key_range(void *start, unsigned long size)
    +{
    + LIST_HEAD(zapped_classes);

    + __lockdep_free_key_range(&zapped_classes, start, size);
    free_zapped_classes(&zapped_classes);
    }

    --
    2.20.0.rc0.387.gc7a69e6b6c-goog
    \
     
     \ /
      Last update: 2018-11-29 00:45    [W:5.511 / U:0.104 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site