lkml.org 
[lkml]   [2015]   [Sep]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.14 06/18] perf: Fix PERF_EVENT_IOC_PERIOD migration race
    Date
    3.14-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Peter Zijlstra <peterz@infradead.org>

    commit c7999c6f3fed9e383d3131474588f282ae6d56b9 upstream.

    I ran the perf fuzzer, which triggered some WARN()s which are due to
    trying to stop/restart an event on the wrong CPU.

    Use the normal IPI pattern to ensure we run the code on the correct CPU.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Cc: Vince Weaver <vincent.weaver@maine.edu>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Fixes: bad7192b842c ("perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period")
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    kernel/events/core.c | 75 +++++++++++++++++++++++++++++++++++++--------------
    1 file changed, 55 insertions(+), 20 deletions(-)

    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -3562,28 +3562,21 @@ static void perf_event_for_each(struct p
    mutex_unlock(&ctx->mutex);
    }

    -static int perf_event_period(struct perf_event *event, u64 __user *arg)
    -{
    - struct perf_event_context *ctx = event->ctx;
    - int ret = 0, active;
    +struct period_event {
    + struct perf_event *event;
    u64 value;
    +};

    - if (!is_sampling_event(event))
    - return -EINVAL;
    -
    - if (copy_from_user(&value, arg, sizeof(value)))
    - return -EFAULT;
    -
    - if (!value)
    - return -EINVAL;
    +static int __perf_event_period(void *info)
    +{
    + struct period_event *pe = info;
    + struct perf_event *event = pe->event;
    + struct perf_event_context *ctx = event->ctx;
    + u64 value = pe->value;
    + bool active;

    - raw_spin_lock_irq(&ctx->lock);
    + raw_spin_lock(&ctx->lock);
    if (event->attr.freq) {
    - if (value > sysctl_perf_event_sample_rate) {
    - ret = -EINVAL;
    - goto unlock;
    - }
    -
    event->attr.sample_freq = value;
    } else {
    event->attr.sample_period = value;
    @@ -3602,11 +3595,53 @@ static int perf_event_period(struct perf
    event->pmu->start(event, PERF_EF_RELOAD);
    perf_pmu_enable(ctx->pmu);
    }
    + raw_spin_unlock(&ctx->lock);
    +
    + return 0;
    +}
    +
    +static int perf_event_period(struct perf_event *event, u64 __user *arg)
    +{
    + struct period_event pe = { .event = event, };
    + struct perf_event_context *ctx = event->ctx;
    + struct task_struct *task;
    + u64 value;
    +
    + if (!is_sampling_event(event))
    + return -EINVAL;
    +
    + if (copy_from_user(&value, arg, sizeof(value)))
    + return -EFAULT;
    +
    + if (!value)
    + return -EINVAL;
    +
    + if (event->attr.freq && value > sysctl_perf_event_sample_rate)
    + return -EINVAL;
    +
    + task = ctx->task;
    + pe.value = value;
    +
    + if (!task) {
    + cpu_function_call(event->cpu, __perf_event_period, &pe);
    + return 0;
    + }
    +
    +retry:
    + if (!task_function_call(task, __perf_event_period, &pe))
    + return 0;
    +
    + raw_spin_lock_irq(&ctx->lock);
    + if (ctx->is_active) {
    + raw_spin_unlock_irq(&ctx->lock);
    + task = ctx->task;
    + goto retry;
    + }

    -unlock:
    + __perf_event_period(&pe);
    raw_spin_unlock_irq(&ctx->lock);

    - return ret;
    + return 0;
    }

    static const struct file_operations perf_fops;



    \
     
     \ /
      Last update: 2015-09-12 02:01    [W:3.905 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site