lkml.org 
[lkml]   [2020]   [Aug]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: locking/core] lockdep: Extend __bfs() to work with multiple types of dependencies
    The following commit has been merged into the locking/core branch of tip:

    Commit-ID: 6971c0f345620aae5e6172207a57b7524603a34e
    Gitweb: https://git.kernel.org/tip/6971c0f345620aae5e6172207a57b7524603a34e
    Author: Boqun Feng <boqun.feng@gmail.com>
    AuthorDate: Fri, 07 Aug 2020 15:42:26 +08:00
    Committer: Peter Zijlstra <peterz@infradead.org>
    CommitterDate: Wed, 26 Aug 2020 12:42:04 +02:00

    lockdep: Extend __bfs() to work with multiple types of dependencies

    Now we have four types of dependencies in the dependency graph, and not
    all the pathes carry real dependencies (the dependencies that may cause
    a deadlock), for example:

    Given lock A and B, if we have:

    CPU1 CPU2
    ============= ==============
    write_lock(A); read_lock(B);
    read_lock(B); write_lock(A);

    (assuming read_lock(B) is a recursive reader)

    then we have dependencies A -(ER)-> B, and B -(SN)-> A, and a
    dependency path A -(ER)-> B -(SN)-> A.

    In lockdep w/o recursive locks, a dependency path from A to A
    means a deadlock. However, the above case is obviously not a
    deadlock, because no one holds B exclusively, therefore no one
    waits for the other to release B, so who get A first in CPU1 and
    CPU2 will run non-blockingly.

    As a result, dependency path A -(ER)-> B -(SN)-> A is not a
    real/strong dependency that could cause a deadlock.

    >From the observation above, we know that for a dependency path to be
    real/strong, no two adjacent dependencies can be as -(*R)-> -(S*)->.

    Now our mission is to make __bfs() traverse only the strong dependency
    paths, which is simple: we record whether we only have -(*R)-> for the
    previous lock_list of the path in lock_list::only_xr, and when we pick a
    dependency in the traverse, we 1) filter out -(S*)-> dependency if the
    previous lock_list only has -(*R)-> dependency (i.e. ->only_xr is true)
    and 2) set the next lock_list::only_xr to true if we only have -(*R)->
    left after we filter out dependencies based on 1), otherwise, set it to
    false.

    With this extension for __bfs(), we now need to initialize the root of
    __bfs() properly (with a correct ->only_xr), to do so, we introduce some
    helper functions, which also cleans up a little bit for the __bfs() root
    initialization code.

    Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Link: https://lkml.kernel.org/r/20200807074238.1632519-8-boqun.feng@gmail.com
    ---
    include/linux/lockdep.h | 2 +-
    kernel/locking/lockdep.c | 113 +++++++++++++++++++++++++++++++-------
    2 files changed, 96 insertions(+), 19 deletions(-)

    diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
    index 35c8bb0..57d642d 100644
    --- a/include/linux/lockdep.h
    +++ b/include/linux/lockdep.h
    @@ -57,6 +57,8 @@ struct lock_list {
    u16 distance;
    /* bitmap of different dependencies from head to this */
    u8 dep;
    + /* used by BFS to record whether "prev -> this" only has -(*R)-> */
    + u8 only_xr;

    /*
    * The parent field is used to implement breadth-first search, and the
    diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
    index 16ad1b7..5abc227 100644
    --- a/kernel/locking/lockdep.c
    +++ b/kernel/locking/lockdep.c
    @@ -1551,8 +1551,72 @@ static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
    }

    /*
    - * Forward- or backward-dependency search, used for both circular dependency
    - * checking and hardirq-unsafe/softirq-unsafe checking.
    + * Initialize a lock_list entry @lock belonging to @class as the root for a BFS
    + * search.
    + */
    +static inline void __bfs_init_root(struct lock_list *lock,
    + struct lock_class *class)
    +{
    + lock->class = class;
    + lock->parent = NULL;
    + lock->only_xr = 0;
    +}
    +
    +/*
    + * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
    + * root for a BFS search.
    + *
    + * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
    + * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
    + * and -(S*)->.
    + */
    +static inline void bfs_init_root(struct lock_list *lock,
    + struct held_lock *hlock)
    +{
    + __bfs_init_root(lock, hlock_class(hlock));
    + lock->only_xr = (hlock->read == 2);
    +}
    +
    +/*
    + * Similar to bfs_init_root() but initialize the root for backwards BFS.
    + *
    + * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
    + * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
    + * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
    + */
    +static inline void bfs_init_rootb(struct lock_list *lock,
    + struct held_lock *hlock)
    +{
    + __bfs_init_root(lock, hlock_class(hlock));
    + lock->only_xr = (hlock->read != 0);
    +}
    +
    +/*
    + * Breadth-First Search to find a strong path in the dependency graph.
    + *
    + * @source_entry: the source of the path we are searching for.
    + * @data: data used for the second parameter of @match function
    + * @match: match function for the search
    + * @target_entry: pointer to the target of a matched path
    + * @offset: the offset to struct lock_class to determine whether it is
    + * locks_after or locks_before
    + *
    + * We may have multiple edges (considering different kinds of dependencies,
    + * e.g. ER and SN) between two nodes in the dependency graph. But
    + * only the strong dependency path in the graph is relevant to deadlocks. A
    + * strong dependency path is a dependency path that doesn't have two adjacent
    + * dependencies as -(*R)-> -(S*)->, please see:
    + *
    + * Documentation/locking/lockdep-design.rst
    + *
    + * for more explanation of the definition of strong dependency paths
    + *
    + * In __bfs(), we only traverse in the strong dependency path:
    + *
    + * In lock_list::only_xr, we record whether the previous dependency only
    + * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
    + * filter out any -(S*)-> in the current dependency and after that, the
    + * ->only_xr is set according to whether we only have -(*R)-> left.
    */
    static enum bfs_result __bfs(struct lock_list *source_entry,
    void *data,
    @@ -1582,6 +1646,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
    __cq_enqueue(cq, source_entry);

    while ((lock = __cq_dequeue(cq))) {
    + bool prev_only_xr;

    if (!lock->class) {
    ret = BFS_EINVALIDNODE;
    @@ -1602,10 +1667,26 @@ static enum bfs_result __bfs(struct lock_list *source_entry,

    head = get_dep_list(lock, offset);

    - DEBUG_LOCKS_WARN_ON(!irqs_disabled());
    + prev_only_xr = lock->only_xr;

    list_for_each_entry_rcu(entry, head, entry) {
    unsigned int cq_depth;
    + u8 dep = entry->dep;
    +
    + /*
    + * Mask out all -(S*)-> if we only have *R in previous
    + * step, because -(*R)-> -(S*)-> don't make up a strong
    + * dependency.
    + */
    + if (prev_only_xr)
    + dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
    +
    + /* If nothing left, we skip */
    + if (!dep)
    + continue;
    +
    + /* If there are only -(*R)-> left, set that for the next step */
    + entry->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));

    visit_lock_entry(entry, lock);
    if (match(entry, data)) {
    @@ -1827,8 +1908,7 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
    unsigned long ret, flags;
    struct lock_list this;

    - this.parent = NULL;
    - this.class = class;
    + __bfs_init_root(&this, class);

    raw_local_irq_save(flags);
    lockdep_lock();
    @@ -1854,8 +1934,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
    unsigned long ret, flags;
    struct lock_list this;

    - this.parent = NULL;
    - this.class = class;
    + __bfs_init_root(&this, class);

    raw_local_irq_save(flags);
    lockdep_lock();
    @@ -1898,10 +1977,9 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
    {
    enum bfs_result ret;
    struct lock_list *target_entry;
    - struct lock_list src_entry = {
    - .class = hlock_class(src),
    - .parent = NULL,
    - };
    + struct lock_list src_entry;
    +
    + bfs_init_root(&src_entry, src);

    debug_atomic_inc(nr_cyclic_checks);

    @@ -1937,10 +2015,9 @@ check_redundant(struct held_lock *src, struct held_lock *target)
    {
    enum bfs_result ret;
    struct lock_list *target_entry;
    - struct lock_list src_entry = {
    - .class = hlock_class(src),
    - .parent = NULL,
    - };
    + struct lock_list src_entry;
    +
    + bfs_init_root(&src_entry, src);

    debug_atomic_inc(nr_redundant_checks);

    @@ -3556,8 +3633,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
    struct lock_list root;
    struct lock_list *target_entry;

    - root.parent = NULL;
    - root.class = hlock_class(this);
    + bfs_init_root(&root, this);
    ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
    if (bfs_error(ret)) {
    print_bfs_bug(ret);
    @@ -3583,8 +3659,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
    struct lock_list root;
    struct lock_list *target_entry;

    - root.parent = NULL;
    - root.class = hlock_class(this);
    + bfs_init_rootb(&root, this);
    ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
    if (bfs_error(ret)) {
    print_bfs_bug(ret);
    \
     
     \ /
      Last update: 2020-08-27 09:59    [W:4.277 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site