lkml.org 
[lkml]   [2022]   [Jul]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.14 04/37] perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
    Date
    From: Peter Zijlstra <peterz@infradead.org>

    [ Upstream commit 68e3c69803dada336893640110cb87221bb01dcf ]

    Yang Jihing reported a race between perf_event_set_output() and
    perf_mmap_close():

    CPU1 CPU2

    perf_mmap_close(e2)
    if (atomic_dec_and_test(&e2->rb->mmap_count)) // 1 - > 0
    detach_rest = true

    ioctl(e1, IOC_SET_OUTPUT, e2)
    perf_event_set_output(e1, e2)

    ...
    list_for_each_entry_rcu(e, &e2->rb->event_list, rb_entry)
    ring_buffer_attach(e, NULL);
    // e1 isn't yet added and
    // therefore not detached

    ring_buffer_attach(e1, e2->rb)
    list_add_rcu(&e1->rb_entry,
    &e2->rb->event_list)

    After this; e1 is attached to an unmapped rb and a subsequent
    perf_mmap() will loop forever more:

    again:
    mutex_lock(&e->mmap_mutex);
    if (event->rb) {
    ...
    if (!atomic_inc_not_zero(&e->rb->mmap_count)) {
    ...
    mutex_unlock(&e->mmap_mutex);
    goto again;
    }
    }

    The loop in perf_mmap_close() holds e2->mmap_mutex, while the attach
    in perf_event_set_output() holds e1->mmap_mutex. As such there is no
    serialization to avoid this race.

    Change perf_event_set_output() to take both e1->mmap_mutex and
    e2->mmap_mutex to alleviate that problem. Additionally, have the loop
    in perf_mmap() detach the rb directly, this avoids having to wait for
    the concurrent perf_mmap_close() to get around to doing it to make
    progress.

    Fixes: 9bb5d40cd93c ("perf: Fix mmap() accounting hole")
    Reported-by: Yang Jihong <yangjihong1@huawei.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Tested-by: Yang Jihong <yangjihong1@huawei.com>
    Link: https://lkml.kernel.org/r/YsQ3jm2GR38SW7uD@worktop.programming.kicks-ass.net
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    kernel/events/core.c | 45 ++++++++++++++++++++++++++++++--------------
    1 file changed, 31 insertions(+), 14 deletions(-)

    diff --git a/kernel/events/core.c b/kernel/events/core.c
    index 93e21e319d70..2ad8acff03db 100644
    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -5429,10 +5429,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)

    if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
    /*
    - * Raced against perf_mmap_close() through
    - * perf_event_set_output(). Try again, hope for better
    - * luck.
    + * Raced against perf_mmap_close(); remove the
    + * event and try again.
    */
    + ring_buffer_attach(event, NULL);
    mutex_unlock(&event->mmap_mutex);
    goto again;
    }
    @@ -9859,14 +9859,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
    goto out;
    }

    +static void mutex_lock_double(struct mutex *a, struct mutex *b)
    +{
    + if (b < a)
    + swap(a, b);
    +
    + mutex_lock(a);
    + mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
    +}
    +
    static int
    perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
    {
    struct ring_buffer *rb = NULL;
    int ret = -EINVAL;

    - if (!output_event)
    + if (!output_event) {
    + mutex_lock(&event->mmap_mutex);
    goto set;
    + }

    /* don't allow circular references */
    if (event == output_event)
    @@ -9904,8 +9915,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
    event->pmu != output_event->pmu)
    goto out;

    + /*
    + * Hold both mmap_mutex to serialize against perf_mmap_close(). Since
    + * output_event is already on rb->event_list, and the list iteration
    + * restarts after every removal, it is guaranteed this new event is
    + * observed *OR* if output_event is already removed, it's guaranteed we
    + * observe !rb->mmap_count.
    + */
    + mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
    set:
    - mutex_lock(&event->mmap_mutex);
    /* Can't redirect output if we've got an active mmap() */
    if (atomic_read(&event->mmap_count))
    goto unlock;
    @@ -9915,6 +9933,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
    rb = ring_buffer_get(output_event);
    if (!rb)
    goto unlock;
    +
    + /* did we race against perf_mmap_close() */
    + if (!atomic_read(&rb->mmap_count)) {
    + ring_buffer_put(rb);
    + goto unlock;
    + }
    }

    ring_buffer_attach(event, rb);
    @@ -9922,20 +9946,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
    ret = 0;
    unlock:
    mutex_unlock(&event->mmap_mutex);
    + if (output_event)
    + mutex_unlock(&output_event->mmap_mutex);

    out:
    return ret;
    }

    -static void mutex_lock_double(struct mutex *a, struct mutex *b)
    -{
    - if (b < a)
    - swap(a, b);
    -
    - mutex_lock(a);
    - mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
    -}
    -
    static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
    {
    bool nmi_safe = false;
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-07-27 18:27    [W:2.282 / U:0.952 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site