lkml.org 
[lkml]   [2020]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.4 024/215] cgroup: fix cgroup_sk_alloc() for sk_clone_lock()
    Date
    From: Cong Wang <xiyou.wangcong@gmail.com>

    [ Upstream commit ad0f75e5f57ccbceec13274e1e242f2b5a6397ed ]

    When we clone a socket in sk_clone_lock(), its sk_cgrp_data is
    copied, so the cgroup refcnt must be taken too. And, unlike the
    sk_alloc() path, sock_update_netprioidx() is not called here.
    Therefore, it is safe and necessary to grab the cgroup refcnt
    even when cgroup_sk_alloc is disabled.

    sk_clone_lock() is in BH context anyway, the in_interrupt()
    would terminate this function if called there. And for sk_alloc()
    skcd->val is always zero. So it's safe to factor out the code
    to make it more readable.

    The global variable 'cgroup_sk_alloc_disabled' is used to determine
    whether to take these reference counts. It is impossible to make
    the reference counting correct unless we save this bit of information
    in skcd->val. So, add a new bit there to record whether the socket
    has already taken the reference counts. This obviously relies on
    kmalloc() to align cgroup pointers to at least 4 bytes,
    ARCH_KMALLOC_MINALIGN is certainly larger than that.

    This bug seems to be introduced since the beginning, commit
    d979a39d7242 ("cgroup: duplicate cgroup reference when cloning sockets")
    tried to fix it but not compeletely. It seems not easy to trigger until
    the recent commit 090e28b229af
    ("netprio_cgroup: Fix unlimited memory leak of v2 cgroups") was merged.

    Fixes: bd1060a1d671 ("sock, cgroup: add sock->sk_cgroup")
    Reported-by: Cameron Berkenpas <cam@neo-zeon.de>
    Reported-by: Peter Geis <pgwipeout@gmail.com>
    Reported-by: Lu Fengqi <lufq.fnst@cn.fujitsu.com>
    Reported-by: Daniël Sonck <dsonck92@gmail.com>
    Reported-by: Zhang Qiang <qiang.zhang@windriver.com>
    Tested-by: Cameron Berkenpas <cam@neo-zeon.de>
    Tested-by: Peter Geis <pgwipeout@gmail.com>
    Tested-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
    Cc: Daniel Borkmann <daniel@iogearbox.net>
    Cc: Zefan Li <lizefan@huawei.com>
    Cc: Tejun Heo <tj@kernel.org>
    Cc: Roman Gushchin <guro@fb.com>
    Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    include/linux/cgroup-defs.h | 6 ++++--
    include/linux/cgroup.h | 4 +++-
    kernel/cgroup/cgroup.c | 31 +++++++++++++++++++------------
    net/core/sock.c | 2 +-
    4 files changed, 27 insertions(+), 16 deletions(-)

    --- a/include/linux/cgroup-defs.h
    +++ b/include/linux/cgroup-defs.h
    @@ -797,7 +797,8 @@ struct sock_cgroup_data {
    union {
    #ifdef __LITTLE_ENDIAN
    struct {
    - u8 is_data;
    + u8 is_data : 1;
    + u8 no_refcnt : 1;
    u8 padding;
    u16 prioidx;
    u32 classid;
    @@ -807,7 +808,8 @@ struct sock_cgroup_data {
    u32 classid;
    u16 prioidx;
    u8 padding;
    - u8 is_data;
    + u8 no_refcnt : 1;
    + u8 is_data : 1;
    } __packed;
    #endif
    u64 val;
    --- a/include/linux/cgroup.h
    +++ b/include/linux/cgroup.h
    @@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock;

    void cgroup_sk_alloc_disable(void);
    void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
    +void cgroup_sk_clone(struct sock_cgroup_data *skcd);
    void cgroup_sk_free(struct sock_cgroup_data *skcd);

    static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
    @@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup
    */
    v = READ_ONCE(skcd->val);

    - if (v & 1)
    + if (v & 3)
    return &cgrp_dfl_root.cgrp;

    return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
    @@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup
    #else /* CONFIG_CGROUP_DATA */

    static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
    +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
    static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}

    #endif /* CONFIG_CGROUP_DATA */
    --- a/kernel/cgroup/cgroup.c
    +++ b/kernel/cgroup/cgroup.c
    @@ -6379,18 +6379,8 @@ void cgroup_sk_alloc_disable(void)

    void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
    {
    - if (cgroup_sk_alloc_disabled)
    - return;
    -
    - /* Socket clone path */
    - if (skcd->val) {
    - /*
    - * We might be cloning a socket which is left in an empty
    - * cgroup and the cgroup might have already been rmdir'd.
    - * Don't use cgroup_get_live().
    - */
    - cgroup_get(sock_cgroup_ptr(skcd));
    - cgroup_bpf_get(sock_cgroup_ptr(skcd));
    + if (cgroup_sk_alloc_disabled) {
    + skcd->no_refcnt = 1;
    return;
    }

    @@ -6415,10 +6405,27 @@ void cgroup_sk_alloc(struct sock_cgroup_
    rcu_read_unlock();
    }

    +void cgroup_sk_clone(struct sock_cgroup_data *skcd)
    +{
    + if (skcd->val) {
    + if (skcd->no_refcnt)
    + return;
    + /*
    + * We might be cloning a socket which is left in an empty
    + * cgroup and the cgroup might have already been rmdir'd.
    + * Don't use cgroup_get_live().
    + */
    + cgroup_get(sock_cgroup_ptr(skcd));
    + cgroup_bpf_get(sock_cgroup_ptr(skcd));
    + }
    +}
    +
    void cgroup_sk_free(struct sock_cgroup_data *skcd)
    {
    struct cgroup *cgrp = sock_cgroup_ptr(skcd);

    + if (skcd->no_refcnt)
    + return;
    cgroup_bpf_put(cgrp);
    cgroup_put(cgrp);
    }
    --- a/net/core/sock.c
    +++ b/net/core/sock.c
    @@ -1837,7 +1837,7 @@ struct sock *sk_clone_lock(const struct
    /* sk->sk_memcg will be populated at accept() time */
    newsk->sk_memcg = NULL;

    - cgroup_sk_alloc(&newsk->sk_cgrp_data);
    + cgroup_sk_clone(&newsk->sk_cgrp_data);

    rcu_read_lock();
    filter = rcu_dereference(sk->sk_filter);

    \
     
     \ /
      Last update: 2020-07-20 17:57    [W:4.295 / U:0.292 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site