lkml.org 
[lkml]   [2020]   [Oct]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.8 100/633] perf/x86: Fix n_pair for cancelled txn
    Date
    [ Upstream commit 871a93b0aad65a7f44ee25f2d17932ef6d559850 ]

    Kan reported that n_metric gets corrupted for cancelled transactions;
    a similar issue exists for n_pair for AMD's Large Increment thing.

    The problem was confirmed and confirmed fixed by Kim using:

    sudo perf stat -e "{cycles,cycles,cycles,cycles}:D" -a sleep 10 &

    # should succeed:
    sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

    # should fail:
    sudo perf stat -e "{fp_ret_sse_avx_ops.all,fp_ret_sse_avx_ops.all,cycles}:D" -a workload

    # previously failed, now succeeds with this patch:
    sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

    Fixes: 5738891229a2 ("perf/x86/amd: Add support for Large Increment per Cycle Events")
    Reported-by: Kan Liang <kan.liang@linux.intel.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Tested-by: Kim Phillips <kim.phillips@amd.com>
    Link: https://lkml.kernel.org/r/20201005082516.GG2628@hirez.programming.kicks-ass.net
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/x86/events/core.c | 6 +++++-
    arch/x86/events/perf_event.h | 1 +
    2 files changed, 6 insertions(+), 1 deletion(-)

    diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
    index 4103665c6e032..29640b4079af0 100644
    --- a/arch/x86/events/core.c
    +++ b/arch/x86/events/core.c
    @@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,

    cpuc->event_list[n] = event;
    n++;
    - if (is_counter_pair(&event->hw))
    + if (is_counter_pair(&event->hw)) {
    cpuc->n_pair++;
    + cpuc->n_txn_pair++;
    + }
    }
    return n;
    }
    @@ -1953,6 +1955,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)

    perf_pmu_disable(pmu);
    __this_cpu_write(cpu_hw_events.n_txn, 0);
    + __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
    }

    /*
    @@ -1978,6 +1981,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
    */
    __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
    __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
    + __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
    perf_pmu_enable(pmu);
    }

    diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
    index e17a3d8a47ede..d4d482d16fe18 100644
    --- a/arch/x86/events/perf_event.h
    +++ b/arch/x86/events/perf_event.h
    @@ -198,6 +198,7 @@ struct cpu_hw_events {
    they've never been enabled yet */
    int n_txn; /* the # last events in the below arrays;
    added in the current transaction */
    + int n_txn_pair;
    int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
    u64 tags[X86_PMC_IDX_MAX];

    --
    2.25.1


    \
     
     \ /
      Last update: 2020-10-27 18:11    [W:4.042 / U:0.980 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site