lkml.org 
[lkml]   [2022]   [Jun]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2] cgroup: serialize css kill and release paths
    Date
    Syzbot found a corrupted list bug scenario that can be triggered from
    cgroup_subtree_control_write(cgrp). The reproduces writes to
    cgroup.subtree_control file, which invokes:
    cgroup_apply_control_enable()->css_create()->css_populate_dir(), which
    then fails with a fault injected -ENOMEM.
    In such scenario the css_killed_work_fn will be en-queued via
    cgroup_apply_control_disable(cgrp)->kill_css(css), and bail out to
    cgroup_kn_unlock(). Then cgroup_kn_unlock() will call:
    cgroup_put(cgrp)->css_put(&cgrp->self), which will try to enqueue
    css_release_work_fn for the same css instance, causing a list_add
    corruption bug, as can be seen in the syzkaller report [1].

    Fix this by synchronizing the css ref_kill and css_release jobs.
    css_release() function will check if the css_killed_work_fn() has been
    scheduled for the css and only en-queue the css_release_work_fn()
    if css_killed_work_fn wasn't already en-queued. Otherwise css_release() will
    set the CSS_REL_LATER flag for that css. This will cause the
    css_release_work_fn() work to be executed after css_killed_work_fn() is finished.

    Two scc flags have been introduced to implement this serialization mechanizm:

    * CSS_KILL_ENQED, which will be set when css_killed_work_fn() is en-queued, and
    * CSS_REL_LATER, which, if set, will cause the css_release_work_fn() to be
    scheduled after the css_killed_work_fn is finished.

    There is also a new lock, which will protect the integrity of the css flags.

    [1] https://syzkaller.appspot.com/bug?id=e26e54d6eac9d9fb50b221ec3e4627b327465dbd

    Cc: Tejun Heo <tj@kernel.org>
    Cc: Michal Koutny<mkoutny@suse.com>
    Cc: Zefan Li <lizefan.x@bytedance.com>
    Cc: Johannes Weiner <hannes@cmpxchg.org>
    Cc: Christian Brauner <brauner@kernel.org>
    Cc: Alexei Starovoitov <ast@kernel.org>
    Cc: Daniel Borkmann <daniel@iogearbox.net>
    Cc: Andrii Nakryiko <andrii@kernel.org>
    Cc: Martin KaFai Lau <kafai@fb.com>
    Cc: Song Liu <songliubraving@fb.com>
    Cc: Yonghong Song <yhs@fb.com>
    Cc: John Fastabend <john.fastabend@gmail.com>
    Cc: KP Singh <kpsingh@kernel.org>
    Cc: <cgroups@vger.kernel.org>
    Cc: <netdev@vger.kernel.org>
    Cc: <bpf@vger.kernel.org>
    Cc: <stable@vger.kernel.org>
    Cc: <linux-kernel@vger.kernel.org>

    Reported-and-tested-by: syzbot+e42ae441c3b10acf9e9d@syzkaller.appspotmail.com
    Fixes: 8f36aaec9c92 ("cgroup: Use rcu_work instead of explicit rcu and work item")
    Signed-off-by: Tadeusz Struk <tadeusz.struk@linaro.org>
    ---
    v2: Use correct lock in css_killed_work_fn()
    ---
    include/linux/cgroup-defs.h | 4 ++++
    kernel/cgroup/cgroup.c | 35 ++++++++++++++++++++++++++++++++---
    2 files changed, 36 insertions(+), 3 deletions(-)

    diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
    index 1bfcfb1af352..8dc8b4edb242 100644
    --- a/include/linux/cgroup-defs.h
    +++ b/include/linux/cgroup-defs.h
    @@ -53,6 +53,8 @@ enum {
    CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
    CSS_VISIBLE = (1 << 3), /* css is visible to userland */
    CSS_DYING = (1 << 4), /* css is dying */
    + CSS_KILL_ENQED = (1 << 5), /* kill work enqueued for the css */
    + CSS_REL_LATER = (1 << 6), /* release needs to be done after kill */
    };

    /* bits in struct cgroup flags field */
    @@ -162,6 +164,8 @@ struct cgroup_subsys_state {
    */
    int id;

    + /* lock to protect flags */
    + spinlock_t lock;
    unsigned int flags;

    /*
    diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
    index 1779ccddb734..b1bbd438d426 100644
    --- a/kernel/cgroup/cgroup.c
    +++ b/kernel/cgroup/cgroup.c
    @@ -5210,8 +5210,23 @@ static void css_release(struct percpu_ref *ref)
    struct cgroup_subsys_state *css =
    container_of(ref, struct cgroup_subsys_state, refcnt);

    - INIT_WORK(&css->destroy_work, css_release_work_fn);
    - queue_work(cgroup_destroy_wq, &css->destroy_work);
    + spin_lock_bh(&css->lock);
    +
    + /*
    + * Check if the css_killed_work_fn work has been scheduled for this
    + * css and enqueue css_release_work_fn only if it wasn't.
    + * Otherwise set the CSS_REL_LATER flag, which will cause
    + * release to be enqueued after css_killed_work_fn is finished.
    + * This is to prevent list corruption by en-queuing two instance
    + * of the same work struct on the same WQ, namely cgroup_destroy_wq.
    + */
    + if (!(css->flags & CSS_KILL_ENQED)) {
    + INIT_WORK(&css->destroy_work, css_release_work_fn);
    + queue_work(cgroup_destroy_wq, &css->destroy_work);
    + } else {
    + css->flags |= CSS_REL_LATER;
    + }
    + spin_unlock_bh(&css->lock);
    }

    static void init_and_link_css(struct cgroup_subsys_state *css,
    @@ -5230,6 +5245,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
    INIT_LIST_HEAD(&css->rstat_css_node);
    css->serial_nr = css_serial_nr_next++;
    atomic_set(&css->online_cnt, 0);
    + spin_lock_init(&css->lock);

    if (cgroup_parent(cgrp)) {
    css->parent = cgroup_css(cgroup_parent(cgrp), ss);
    @@ -5545,10 +5561,12 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
    */
    static void css_killed_work_fn(struct work_struct *work)
    {
    - struct cgroup_subsys_state *css =
    + struct cgroup_subsys_state *css_killed, *css =
    container_of(work, struct cgroup_subsys_state, destroy_work);

    mutex_lock(&cgroup_mutex);
    + css_killed = css;
    + css_killed->flags &= ~CSS_KILL_ENQED;

    do {
    offline_css(css);
    @@ -5557,6 +5575,14 @@ static void css_killed_work_fn(struct work_struct *work)
    css = css->parent;
    } while (css && atomic_dec_and_test(&css->online_cnt));

    + spin_lock_bh(&css_killed->lock);
    + if (css_killed->flags & CSS_REL_LATER) {
    + /* If css_release work was delayed for the css enqueue it now. */
    + INIT_WORK(&css_killed->destroy_work, css_release_work_fn);
    + queue_work(cgroup_destroy_wq, &css_killed->destroy_work);
    + css_killed->flags &= ~CSS_REL_LATER;
    + }
    + spin_unlock_bh(&css_killed->lock);
    mutex_unlock(&cgroup_mutex);
    }

    @@ -5566,10 +5592,13 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
    struct cgroup_subsys_state *css =
    container_of(ref, struct cgroup_subsys_state, refcnt);

    + spin_lock_bh(&css->lock);
    if (atomic_dec_and_test(&css->online_cnt)) {
    + css->flags |= CSS_KILL_ENQED;
    INIT_WORK(&css->destroy_work, css_killed_work_fn);
    queue_work(cgroup_destroy_wq, &css->destroy_work);
    }
    + spin_unlock_bh(&css->lock);
    }

    /**
    --
    2.36.1
    \
     
     \ /
      Last update: 2022-06-03 20:31    [W:2.866 / U:0.364 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site