lkml.org 
[lkml]   [2015]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH V4 20/24] perf tools: Output sample flags and insn_len from intel_bts
    Date
    intel_bts synthesizes samples.  Fill in the new flags and
    insn_len members with instruction information.

    Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
    ---
    tools/perf/util/intel-bts.c | 127 ++++++++++++++++++++++++++++++++++++++++++--
    1 file changed, 123 insertions(+), 4 deletions(-)

    diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
    index e664f54..fe777c1 100644
    --- a/tools/perf/util/intel-bts.c
    +++ b/tools/perf/util/intel-bts.c
    @@ -30,6 +30,7 @@
    #include "debug.h"
    #include "tsc.h"
    #include "auxtrace.h"
    +#include "intel-pt-decoder/intel-pt-insn-decoder.h"
    #include "intel-bts.h"

    #define MAX_TIMESTAMP (~0ULL)
    @@ -55,6 +56,7 @@ struct intel_bts {
    bool cap_user_time_zero;
    struct itrace_synth_opts synth_opts;
    bool sample_branches;
    + u32 branches_filter;
    u64 branches_sample_type;
    u64 branches_id;
    size_t branches_event_size;
    @@ -71,6 +73,8 @@ struct intel_bts_queue {
    pid_t tid;
    int cpu;
    u64 time;
    + struct intel_pt_insn intel_pt_insn;
    + u32 sample_flags;
    };

    struct branch {
    @@ -278,6 +282,8 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
    sample.stream_id = btsq->bts->branches_id;
    sample.period = 1;
    sample.cpu = btsq->cpu;
    + sample.flags = btsq->sample_flags;
    + sample.insn_len = btsq->intel_pt_insn.length;

    if (bts->synth_opts.inject) {
    event.sample.header.size = bts->branches_event_size;
    @@ -297,11 +303,116 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
    return ret;
    }

    +static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
    +{
    + struct machine *machine = btsq->bts->machine;
    + struct thread *thread;
    + struct addr_location al;
    + unsigned char buf[1024];
    + size_t bufsz;
    + ssize_t len;
    + int x86_64;
    + uint8_t cpumode;
    +
    + bufsz = intel_pt_insn_max_size();
    +
    + if (machine__kernel_ip(machine, ip))
    + cpumode = PERF_RECORD_MISC_KERNEL;
    + else
    + cpumode = PERF_RECORD_MISC_USER;
    +
    + thread = machine__find_thread(machine, -1, btsq->tid);
    + if (!thread)
    + return -1;
    +
    + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
    + if (!al.map || !al.map->dso)
    + return -1;
    +
    + len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, bufsz);
    + if (len <= 0)
    + return -1;
    +
    + /* Load maps to ensure dso->is_64_bit has been updated */
    + map__load(al.map, machine->symbol_filter);
    +
    + x86_64 = al.map->dso->is_64_bit;
    +
    + if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
    + return -1;
    +
    + return 0;
    +}
    +
    +static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
    + pid_t tid, u64 ip)
    +{
    + union perf_event event;
    + const char *msg = "Failed to get instruction";
    + int code = EILSEQ;
    + int err;
    +
    + auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
    + code, cpu, pid, tid, ip, msg);
    +
    + err = perf_session__deliver_synth_event(bts->session, &event, NULL);
    + if (err)
    + pr_err("Intel BTS: failed to deliver error event, error %d\n",
    + err);
    +
    + return err;
    +}
    +
    +static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
    + struct branch *branch)
    +{
    + int err;
    +
    + if (!branch->from) {
    + if (branch->to)
    + btsq->sample_flags = PERF_IP_FLAG_BRANCH |
    + PERF_IP_FLAG_TRACE_BEGIN;
    + else
    + btsq->sample_flags = 0;
    + btsq->intel_pt_insn.length = 0;
    + } else if (!branch->to) {
    + btsq->sample_flags = PERF_IP_FLAG_BRANCH |
    + PERF_IP_FLAG_TRACE_END;
    + btsq->intel_pt_insn.length = 0;
    + } else {
    + err = intel_bts_get_next_insn(btsq, branch->from);
    + if (err) {
    + btsq->sample_flags = 0;
    + btsq->intel_pt_insn.length = 0;
    + if (!btsq->bts->synth_opts.errors)
    + return 0;
    + err = intel_bts_synth_error(btsq->bts, btsq->cpu,
    + btsq->pid, btsq->tid,
    + branch->from);
    + return err;
    + }
    + btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
    + /* Check for an async branch into the kernel */
    + if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
    + machine__kernel_ip(btsq->bts->machine, branch->to) &&
    + btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
    + PERF_IP_FLAG_CALL |
    + PERF_IP_FLAG_SYSCALLRET))
    + btsq->sample_flags = PERF_IP_FLAG_BRANCH |
    + PERF_IP_FLAG_CALL |
    + PERF_IP_FLAG_ASYNC |
    + PERF_IP_FLAG_INTERRUPT;
    + }
    +
    + return 0;
    +}
    +
    static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
    struct auxtrace_buffer *buffer)
    {
    struct branch *branch;
    - size_t sz;
    + size_t sz, bsz = sizeof(struct branch);
    + u32 filter = btsq->bts->branches_filter;
    int err = 0;

    if (buffer->use_data) {
    @@ -315,14 +426,15 @@ static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
    if (!btsq->bts->sample_branches)
    return 0;

    - while (sz > sizeof(struct branch)) {
    + for (; sz > bsz; branch += 1, sz -= bsz) {
    if (!branch->from && !branch->to)
    continue;
    + intel_bts_get_branch_type(btsq, branch);
    + if (filter && !(filter & btsq->sample_flags))
    + continue;
    err = intel_bts_synth_branch_sample(btsq, branch);
    if (err)
    break;
    - branch += 1;
    - sz -= sizeof(struct branch);
    }
    return err;
    }
    @@ -768,6 +880,13 @@ int intel_bts_process_auxtrace_info(union perf_event *event,
    else
    itrace_synth_opts__set_default(&bts->synth_opts);

    + if (bts->synth_opts.calls)
    + bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
    + PERF_IP_FLAG_TRACE_END;
    + if (bts->synth_opts.returns)
    + bts->branches_filter |= PERF_IP_FLAG_RETURN |
    + PERF_IP_FLAG_TRACE_BEGIN;
    +
    err = intel_bts_synth_events(bts, session);
    if (err)
    goto err_free_queues;
    --
    1.9.1


    \
     
     \ /
      Last update: 2015-04-30 17:21    [W:4.371 / U:0.160 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site