lkml.org 
[lkml]   [2019]   [Mar]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 02/22] perf/x86/intel: Extract memory code PEBS parser for reuse
    Date
    From: Andi Kleen <ak@linux.intel.com>

    Extract some code related to memory profiling from the PEBS record
    parser into separate functions. It can be reused by the upcoming
    adaptive PEBS parser. No functional changes.

    Signed-off-by: Andi Kleen <ak@linux.intel.com>
    Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
    ---
    arch/x86/events/intel/ds.c | 63 ++++++++++++++++++++------------------
    1 file changed, 34 insertions(+), 29 deletions(-)

    diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
    index 10c99ce1fead..4a2206876baa 100644
    --- a/arch/x86/events/intel/ds.c
    +++ b/arch/x86/events/intel/ds.c
    @@ -1125,34 +1125,50 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
    return 0;
    }

    -static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
    +static inline u64 intel_hsw_weight(u64 tsx_tuning)
    {
    - if (pebs->tsx_tuning) {
    - union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
    + if (tsx_tuning) {
    + union hsw_tsx_tuning tsx = { .value = tsx_tuning };
    return tsx.cycles_last_block;
    }
    return 0;
    }

    -static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
    +static u64 intel_hsw_transaction(u64 tsx_tuning, u64 ax)
    {
    - u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
    + u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;

    /* For RTM XABORTs also log the abort code from AX */
    - if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
    - txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
    + if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
    + txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
    return txn;
    }

    +#define PERF_X86_EVENT_PEBS_HSW_PREC \
    + (PERF_X86_EVENT_PEBS_ST_HSW | \
    + PERF_X86_EVENT_PEBS_LD_HSW | \
    + PERF_X86_EVENT_PEBS_NA_HSW)
    +
    +static u64 get_data_src(struct perf_event *event, u64 aux)
    +{
    + u64 val = PERF_MEM_NA;
    + int fl = event->hw.flags;
    + bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
    +
    + if (fl & PERF_X86_EVENT_PEBS_LDLAT)
    + val = load_latency_data(aux);
    + else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
    + val = precise_datala_hsw(event, aux);
    + else if (fst)
    + val = precise_store_data(aux);
    + return val;
    +}
    +
    static void setup_pebs_sample_data(struct perf_event *event,
    struct pt_regs *iregs, void *__pebs,
    struct perf_sample_data *data,
    struct pt_regs *regs)
    {
    -#define PERF_X86_EVENT_PEBS_HSW_PREC \
    - (PERF_X86_EVENT_PEBS_ST_HSW | \
    - PERF_X86_EVENT_PEBS_LD_HSW | \
    - PERF_X86_EVENT_PEBS_NA_HSW)
    /*
    * We cast to the biggest pebs_record but are careful not to
    * unconditionally access the 'extra' entries.
    @@ -1160,17 +1176,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
    struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
    struct pebs_record_skl *pebs = __pebs;
    u64 sample_type;
    - int fll, fst, dsrc;
    - int fl = event->hw.flags;
    + int fll;

    if (pebs == NULL)
    return;

    sample_type = event->attr.sample_type;
    - dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
    -
    - fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
    - fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
    + fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;

    perf_sample_data_init(data, 0, event->hw.last_period);

    @@ -1185,16 +1197,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
    /*
    * data.data_src encodes the data source
    */
    - if (dsrc) {
    - u64 val = PERF_MEM_NA;
    - if (fll)
    - val = load_latency_data(pebs->dse);
    - else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
    - val = precise_datala_hsw(event, pebs->dse);
    - else if (fst)
    - val = precise_store_data(pebs->dse);
    - data->data_src.val = val;
    - }
    + if (sample_type & PERF_SAMPLE_DATA_SRC)
    + data->data_src.val = get_data_src(event, pebs->dse);

    /*
    * We must however always use iregs for the unwinder to stay sane; the
    @@ -1281,10 +1285,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
    if (x86_pmu.intel_cap.pebs_format >= 2) {
    /* Only set the TSX weight when no memory weight. */
    if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
    - data->weight = intel_hsw_weight(pebs);
    + data->weight = intel_hsw_weight(pebs->tsx_tuning);

    if (sample_type & PERF_SAMPLE_TRANSACTION)
    - data->txn = intel_hsw_transaction(pebs);
    + data->txn = intel_hsw_transaction(pebs->tsx_tuning,
    + pebs->ax);
    }

    /*
    --
    2.17.1
    \
     
     \ /
      Last update: 2019-03-18 22:45    [W:5.306 / U:0.092 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site