diff options
Diffstat (limited to 'tools/perf/util')
28 files changed, 391 insertions, 174 deletions
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 425df5c86c9c..425597186677 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -249,6 +249,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || (strncmp(name, "[vdso]", 6) == 0) || + (strncmp(name, "[vdso32]", 8) == 0) || + (strncmp(name, "[vdsox32]", 9) == 0) || (strncmp(name, "[vsyscall]", 10) == 0)) { m->kmod = false; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 956187bf1a85..46af9dde11e2 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -234,8 +234,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, if (machine__is_default_guest(machine)) return 0; - snprintf(filename, sizeof(filename), "%s/proc/%d/maps", - machine->root_dir, pid); + snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", + machine->root_dir, pid, pid); fp = fopen(filename, "r"); if (fp == NULL) { @@ -416,7 +416,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, { char filename[PATH_MAX]; DIR *tasks; - struct dirent dirent, *next; + struct dirent *dirent; pid_t tgid, ppid; int rc = 0; @@ -445,11 +445,11 @@ static int __event__synthesize_thread(union perf_event *comm_event, return 0; } - while (!readdir_r(tasks, &dirent, &next) && next) { + while ((dirent = readdir(tasks)) != NULL) { char *end; pid_t _pid; - _pid = strtol(dirent.d_name, &end, 10); + _pid = strtol(dirent->d_name, &end, 10); if (*end) continue; @@ -558,7 +558,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, { DIR *proc; char proc_path[PATH_MAX]; - struct dirent dirent, *next; + struct dirent *dirent; union perf_event *comm_event, *mmap_event, *fork_event; int err = -1; @@ -583,9 +583,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (proc == NULL) goto out_free_fork; - while (!readdir_r(proc, &dirent, &next) && next) { + while ((dirent = readdir(proc)) != NULL) { char *end; - pid_t pid = strtol(dirent.d_name, &end, 10); + pid_t pid = strtol(dirent->d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 397fb4ed3c97..0ec76a4af77a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -624,13 +624,13 @@ static void apply_config_terms(struct perf_evsel *evsel, struct perf_evsel_config_term *term; struct list_head *config_terms = &evsel->config_terms; struct perf_event_attr *attr = &evsel->attr; - struct callchain_param param; + /* callgraph default */ + struct callchain_param param = { + .record_mode = callchain_param.record_mode, + }; u32 dump_size = 0; char *callgraph_buf = NULL; - /* callgraph default */ - param.record_mode = callchain_param.record_mode; - list_for_each_entry(term, config_terms, list) { switch (term->type) { case PERF_EVSEL__CONFIG_TERM_PERIOD: @@ -2313,12 +2313,15 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, case EPERM: case EACCES: return scnprintf(msg, size, - "You may not have permission to collect %sstats.\n" - "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" - " -1 - Not paranoid at all\n" - " 0 - Disallow raw tracepoint access for unpriv\n" - " 1 - Disallow cpu events for unpriv\n" - " 2 - Disallow kernel profiling for unpriv", + "You may not have permission to collect %sstats.\n\n" + "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n" + "which controls use of the performance events system by\n" + "unprivileged users (without CAP_SYS_ADMIN).\n\n" + "The default value is 1:\n\n" + " -1: Allow use of (almost) all events by all users\n" + ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n" + ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n" + ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN", target->system_wide ? "system-wide " : ""); case ENOENT: return scnprintf(msg, size, "The %s event is not supported.", diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 43838003c1a1..304f5d710143 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1258,8 +1258,16 @@ static int __event_process_build_id(struct build_id_event *bev, dso__set_build_id(dso, &bev->build_id); - if (!is_kernel_module(filename, cpumode)) - dso->kernel = dso_type; + if (dso_type != DSO_TYPE_USER) { + struct kmod_path m = { .name = NULL, }; + + if (!kmod_path__parse_name(&m, filename) && m.kmod) + dso__set_short_name(dso, strdup(m.name), true); + else + dso->kernel = dso_type; + + free(m.name); + } build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 4fd37d6708cb..f6720afa9f34 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -720,7 +720,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter, * cumulated only one time to prevent entries more than 100% * overhead. */ - he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); + he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); if (he_cache == NULL) return -ENOMEM; @@ -881,8 +881,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, if (err) return err; - iter->max_stack = max_stack_depth; - err = iter->ops->prepare_entry(iter, al); if (err) goto out; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index a48a2078d288..46b7591acd9c 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -91,7 +91,6 @@ struct hist_entry_iter { int curr; bool hide_unresolved; - int max_stack; struct perf_evsel *evsel; struct perf_sample *sample; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 9409d014b46c..dc17c881275d 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -22,6 +22,7 @@ #include <errno.h> #include <stdint.h> #include <inttypes.h> +#include <linux/compiler.h> #include "../cache.h" #include "../util.h" @@ -63,6 +64,25 @@ enum intel_pt_pkt_state { INTEL_PT_STATE_FUP_NO_TIP, }; +static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state) +{ + switch (pkt_state) { + case INTEL_PT_STATE_NO_PSB: + case INTEL_PT_STATE_NO_IP: + case INTEL_PT_STATE_ERR_RESYNC: + case INTEL_PT_STATE_IN_SYNC: + case INTEL_PT_STATE_TNT: + return true; + case INTEL_PT_STATE_TIP: + case INTEL_PT_STATE_TIP_PGD: + case INTEL_PT_STATE_FUP: + case INTEL_PT_STATE_FUP_NO_TIP: + return false; + default: + return true; + }; +} + #ifdef INTEL_PT_STRICT #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB @@ -89,6 +109,9 @@ struct intel_pt_decoder { bool pge; bool have_tma; bool have_cyc; + bool fixup_last_mtc; + bool have_last_ip; + enum intel_pt_param_flags flags; uint64_t pos; uint64_t last_ip; uint64_t ip; @@ -96,6 +119,7 @@ struct intel_pt_decoder { uint64_t timestamp; uint64_t tsc_timestamp; uint64_t ref_timestamp; + uint64_t sample_timestamp; uint64_t ret_addr; uint64_t ctc_timestamp; uint64_t ctc_delta; @@ -123,8 +147,6 @@ struct intel_pt_decoder { bool have_calc_cyc_to_tsc; int exec_mode; unsigned int insn_bytes; - uint64_t sign_bit; - uint64_t sign_bits; uint64_t period; enum intel_pt_period_type period_type; uint64_t tot_insn_cnt; @@ -138,6 +160,7 @@ struct intel_pt_decoder { unsigned int fup_tx_flags; unsigned int tx_flags; uint64_t timestamp_insn_cnt; + uint64_t sample_insn_cnt; uint64_t stuck_ip; int no_progress; int stuck_ip_prd; @@ -191,8 +214,7 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) decoder->data = params->data; decoder->return_compression = params->return_compression; - decoder->sign_bit = (uint64_t)1 << 47; - decoder->sign_bits = ~(((uint64_t)1 << 48) - 1); + decoder->flags = params->flags; decoder->period = params->period; decoder->period_type = params->period_type; @@ -362,21 +384,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen) return 0; } -static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, - const struct intel_pt_pkt *packet, +static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet, uint64_t last_ip) { uint64_t ip; switch (packet->count) { - case 2: + case 1: ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | packet->payload; break; - case 4: + case 2: ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | packet->payload; break; + case 3: + ip = packet->payload; + /* Sign-extend 6-byte ip */ + if (ip & (uint64_t)0x800000000000ULL) + ip |= (uint64_t)0xffff000000000000ULL; + break; + case 4: + ip = (last_ip & (uint64_t)0xffff000000000000ULL) | + packet->payload; + break; case 6: ip = packet->payload; break; @@ -384,16 +415,13 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, return 0; } - if (ip & decoder->sign_bit) - return ip | decoder->sign_bits; - return ip; } static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) { - decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet, - decoder->last_ip); + decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip); + decoder->have_last_ip = true; } static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) @@ -584,10 +612,31 @@ struct intel_pt_calc_cyc_to_tsc_info { uint64_t tsc_timestamp; uint64_t timestamp; bool have_tma; + bool fixup_last_mtc; bool from_mtc; double cbr_cyc_to_tsc; }; +/* + * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower + * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC + * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA + * packet by copying the missing bits from the current MTC assuming the least + * difference between the two, and that the current MTC comes after last_mtc. + */ +static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift, + uint32_t *last_mtc) +{ + uint32_t first_missing_bit = 1U << (16 - mtc_shift); + uint32_t mask = ~(first_missing_bit - 1); + + *last_mtc |= mtc & mask; + if (*last_mtc >= mtc) { + *last_mtc -= first_missing_bit; + *last_mtc &= 0xff; + } +} + static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) { struct intel_pt_decoder *decoder = pkt_info->decoder; @@ -617,6 +666,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) return 0; mtc = pkt_info->packet.payload; + if (decoder->mtc_shift > 8 && data->fixup_last_mtc) { + data->fixup_last_mtc = false; + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, + &data->last_mtc); + } if (mtc > data->last_mtc) mtc_delta = mtc - data->last_mtc; else @@ -685,6 +739,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) data->ctc_delta = 0; data->have_tma = true; + data->fixup_last_mtc = true; return 0; @@ -751,6 +806,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder, .tsc_timestamp = decoder->tsc_timestamp, .timestamp = decoder->timestamp, .have_tma = decoder->have_tma, + .fixup_last_mtc = decoder->fixup_last_mtc, .from_mtc = from_mtc, .cbr_cyc_to_tsc = 0, }; @@ -866,6 +922,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder, decoder->tot_insn_cnt += insn_cnt; decoder->timestamp_insn_cnt += insn_cnt; + decoder->sample_insn_cnt += insn_cnt; decoder->period_insn_cnt += insn_cnt; if (err) { @@ -956,6 +1013,15 @@ out_no_progress: return err; } +static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder, + struct intel_pt_insn *intel_pt_insn, + uint64_t ip, int err) +{ + return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err && + intel_pt_insn->branch == INTEL_PT_BR_INDIRECT && + ip == decoder->ip + intel_pt_insn->length; +} + static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) { struct intel_pt_insn intel_pt_insn; @@ -968,7 +1034,8 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip); if (err == INTEL_PT_RETURN) return 0; - if (err == -EAGAIN) { + if (err == -EAGAIN || + intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) { if (decoder->set_fup_tx_flags) { decoder->set_fup_tx_flags = false; decoder->tx_flags = decoder->fup_tx_flags; @@ -978,7 +1045,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) decoder->state.flags = decoder->fup_tx_flags; return 0; } - return err; + return -EAGAIN; } decoder->set_fup_tx_flags = false; if (err) @@ -1214,8 +1281,8 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder) { intel_pt_log("ERROR: Buffer overflow\n"); intel_pt_clear_tx_flags(decoder); - decoder->have_tma = false; decoder->cbr = 0; + decoder->timestamp_insn_cnt = 0; decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; decoder->overflow = true; return -EOVERFLOW; @@ -1241,6 +1308,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder) } decoder->ctc_delta = 0; decoder->have_tma = true; + decoder->fixup_last_mtc = true; intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n", decoder->ctc_timestamp, decoder->last_mtc, ctc_rem); } @@ -1255,6 +1323,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder) mtc = decoder->packet.payload; + if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) { + decoder->fixup_last_mtc = false; + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, + &decoder->last_mtc); + } + if (mtc > decoder->last_mtc) mtc_delta = mtc - decoder->last_mtc; else @@ -1323,6 +1397,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder) timestamp, decoder->timestamp); else decoder->timestamp = timestamp; + + decoder->timestamp_insn_cnt = 0; } /* Walk PSB+ packets when already in sync. */ @@ -1375,7 +1451,8 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) case INTEL_PT_FUP: decoder->pge = true; - intel_pt_set_last_ip(decoder); + if (decoder->packet.count) + intel_pt_set_last_ip(decoder); break; case INTEL_PT_MODE_TSX: @@ -1422,14 +1499,18 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder) case INTEL_PT_PSB: case INTEL_PT_TSC: case INTEL_PT_TMA: - case INTEL_PT_CBR: case INTEL_PT_MODE_TSX: case INTEL_PT_BAD: case INTEL_PT_PSBEND: intel_pt_log("ERROR: Missing TIP after FUP\n"); decoder->pkt_state = INTEL_PT_STATE_ERR3; + decoder->pkt_step = 0; return -ENOENT; + case INTEL_PT_CBR: + intel_pt_calc_cbr(decoder); + break; + case INTEL_PT_OVF: return intel_pt_overflow(decoder); @@ -1579,6 +1660,8 @@ next: break; case INTEL_PT_PSB: + decoder->last_ip = 0; + decoder->have_last_ip = true; intel_pt_clear_stack(&decoder->stack); err = intel_pt_walk_psbend(decoder); if (err == -EAGAIN) @@ -1657,6 +1740,13 @@ next: } } +static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder) +{ + return decoder->packet.count && + (decoder->have_last_ip || decoder->packet.count == 3 || + decoder->packet.count == 6); +} + /* Walk PSB+ packets to get in sync. */ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) { @@ -1670,6 +1760,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) switch (decoder->packet.type) { case INTEL_PT_TIP_PGD: decoder->continuous_period = false; + __fallthrough; case INTEL_PT_TIP_PGE: case INTEL_PT_TIP: intel_pt_log("ERROR: Unexpected packet\n"); @@ -1677,8 +1768,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) case INTEL_PT_FUP: decoder->pge = true; - if (decoder->last_ip || decoder->packet.count == 6 || - decoder->packet.count == 0) { + if (intel_pt_have_ip(decoder)) { uint64_t current_ip = decoder->ip; intel_pt_set_ip(decoder); @@ -1724,6 +1814,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) decoder->pge = false; decoder->continuous_period = false; intel_pt_clear_tx_flags(decoder); + __fallthrough; + case INTEL_PT_TNT: decoder->have_tma = false; intel_pt_log("ERROR: Unexpected packet\n"); @@ -1764,27 +1856,21 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) switch (decoder->packet.type) { case INTEL_PT_TIP_PGD: decoder->continuous_period = false; + __fallthrough; case INTEL_PT_TIP_PGE: case INTEL_PT_TIP: decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; - if (decoder->last_ip || decoder->packet.count == 6 || - decoder->packet.count == 0) + if (intel_pt_have_ip(decoder)) intel_pt_set_ip(decoder); if (decoder->ip) return 0; break; case INTEL_PT_FUP: - if (decoder->overflow) { - if (decoder->last_ip || - decoder->packet.count == 6 || - decoder->packet.count == 0) - intel_pt_set_ip(decoder); - if (decoder->ip) - return 0; - } - if (decoder->packet.count) - intel_pt_set_last_ip(decoder); + if (intel_pt_have_ip(decoder)) + intel_pt_set_ip(decoder); + if (decoder->ip) + return 0; break; case INTEL_PT_MTC: @@ -1833,6 +1919,9 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) break; case INTEL_PT_PSB: + decoder->last_ip = 0; + decoder->have_last_ip = true; + intel_pt_clear_stack(&decoder->stack); err = intel_pt_walk_psb(decoder); if (err) return err; @@ -1858,6 +1947,8 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) { int err; + decoder->set_fup_tx_flags = false; + intel_pt_log("Scanning for full IP\n"); err = intel_pt_walk_to_ip(decoder); if (err) @@ -1966,6 +2057,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) decoder->pge = false; decoder->continuous_period = false; + decoder->have_last_ip = false; decoder->last_ip = 0; decoder->ip = 0; intel_pt_clear_stack(&decoder->stack); @@ -1974,6 +2066,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) if (err) return err; + decoder->have_last_ip = true; decoder->pkt_state = INTEL_PT_STATE_NO_IP; err = intel_pt_walk_psb(decoder); @@ -1992,7 +2085,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) { - uint64_t est = decoder->timestamp_insn_cnt << 1; + uint64_t est = decoder->sample_insn_cnt << 1; if (!decoder->cbr || !decoder->max_non_turbo_ratio) goto out; @@ -2000,7 +2093,7 @@ static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) est *= decoder->max_non_turbo_ratio; est /= decoder->cbr; out: - return decoder->timestamp + est; + return decoder->sample_timestamp + est; } const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) @@ -2016,7 +2109,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_sync(decoder); break; case INTEL_PT_STATE_NO_IP: + decoder->have_last_ip = false; decoder->last_ip = 0; + decoder->ip = 0; /* Fall through */ case INTEL_PT_STATE_ERR_RESYNC: err = intel_pt_sync_ip(decoder); @@ -2053,26 +2148,27 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) } } while (err == -ENOLINK); - decoder->state.err = err ? intel_pt_ext_err(err) : 0; - decoder->state.timestamp = decoder->timestamp; + if (err) { + decoder->state.err = intel_pt_ext_err(err); + decoder->state.from_ip = decoder->ip; + decoder->sample_timestamp = decoder->timestamp; + decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; + } else { + decoder->state.err = 0; + if (intel_pt_sample_time(decoder->pkt_state)) { + decoder->sample_timestamp = decoder->timestamp; + decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; + } + } + + decoder->state.timestamp = decoder->sample_timestamp; decoder->state.est_timestamp = intel_pt_est_timestamp(decoder); decoder->state.cr3 = decoder->cr3; decoder->state.tot_insn_cnt = decoder->tot_insn_cnt; - if (err) - decoder->state.from_ip = decoder->ip; - return &decoder->state; } -static bool intel_pt_at_psb(unsigned char *buf, size_t len) -{ - if (len < INTEL_PT_PSB_LEN) - return false; - return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR, - INTEL_PT_PSB_LEN); -} - /** * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet. * @buf: pointer to buffer pointer @@ -2161,6 +2257,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len) * @buf: buffer * @len: size of buffer * @tsc: TSC value returned + * @rem: returns remaining size when TSC is found * * Find a TSC packet in @buf and return the TSC value. This function assumes * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a @@ -2168,7 +2265,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len) * * Return: %true if TSC is found, false otherwise. */ -static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc) +static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc, + size_t *rem) { struct intel_pt_pkt packet; int ret; @@ -2179,6 +2277,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc) return false; if (packet.type == INTEL_PT_TSC) { *tsc = packet.payload; + *rem = len; return true; } if (packet.type == INTEL_PT_PSBEND) @@ -2229,6 +2328,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2) * @len_a: size of first buffer * @buf_b: second buffer * @len_b: size of second buffer + * @consecutive: returns true if there is data in buf_b that is consecutive + * to buf_a * * If the trace contains TSC we can look at the last TSC of @buf_a and the * first TSC of @buf_b in order to determine if the buffers overlap, and then @@ -2241,33 +2342,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2) static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a, size_t len_a, unsigned char *buf_b, - size_t len_b) + size_t len_b, bool *consecutive) { uint64_t tsc_a, tsc_b; unsigned char *p; - size_t len; + size_t len, rem_a, rem_b; p = intel_pt_last_psb(buf_a, len_a); if (!p) return buf_b; /* No PSB in buf_a => no overlap */ len = len_a - (p - buf_a); - if (!intel_pt_next_tsc(p, len, &tsc_a)) { + if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) { /* The last PSB+ in buf_a is incomplete, so go back one more */ len_a -= len; p = intel_pt_last_psb(buf_a, len_a); if (!p) return buf_b; /* No full PSB+ => assume no overlap */ len = len_a - (p - buf_a); - if (!intel_pt_next_tsc(p, len, &tsc_a)) + if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) return buf_b; /* No TSC in buf_a => assume no overlap */ } while (1) { /* Ignore PSB+ with no TSC */ - if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) && - intel_pt_tsc_cmp(tsc_a, tsc_b) < 0) - return buf_b; /* tsc_a < tsc_b => no overlap */ + if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) { + int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b); + + /* Same TSC, so buffers are consecutive */ + if (!cmp && rem_b >= rem_a) { + *consecutive = true; + return buf_b + len_b - (rem_b - rem_a); + } + if (cmp < 0) + return buf_b; /* tsc_a < tsc_b => no overlap */ + } if (!intel_pt_step_psb(&buf_b, &len_b)) return buf_b + len_b; /* No PSB in buf_b => no data */ @@ -2281,6 +2390,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a, * @buf_b: second buffer * @len_b: size of second buffer * @have_tsc: can use TSC packets to detect overlap + * @consecutive: returns true if there is data in buf_b that is consecutive + * to buf_a * * When trace samples or snapshots are recorded there is the possibility that * the data overlaps. Note that, for the purposes of decoding, data is only @@ -2291,7 +2402,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a, */ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, unsigned char *buf_b, size_t len_b, - bool have_tsc) + bool have_tsc, bool *consecutive) { unsigned char *found; @@ -2303,7 +2414,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, return buf_b; /* No overlap */ if (have_tsc) { - found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b); + found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b, + consecutive); if (found) return found; } @@ -2318,28 +2430,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, } /* Now len_b >= len_a */ - if (len_b > len_a) { - /* The leftover buffer 'b' must start at a PSB */ - while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) { - if (!intel_pt_step_psb(&buf_a, &len_a)) - return buf_b; /* No overlap */ - } - } - while (1) { /* Potential overlap so check the bytes */ found = memmem(buf_a, len_a, buf_b, len_a); - if (found) + if (found) { + *consecutive = true; return buf_b + len_a; + } /* Try again at next PSB in buffer 'a' */ if (!intel_pt_step_psb(&buf_a, &len_a)) return buf_b; /* No overlap */ - - /* The leftover buffer 'b' must start at a PSB */ - while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) { - if (!intel_pt_step_psb(&buf_a, &len_a)) - return buf_b; /* No overlap */ - } } } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h index 02c38fec1c37..e420bd3be159 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h @@ -53,6 +53,14 @@ enum { INTEL_PT_ERR_MAX, }; +enum intel_pt_param_flags { + /* + * FUP packet can contain next linear instruction pointer instead of + * current linear instruction pointer. + */ + INTEL_PT_FUP_WITH_NLIP = 1 << 0, +}; + struct intel_pt_state { enum intel_pt_sample_type type; int err; @@ -91,6 +99,7 @@ struct intel_pt_params { unsigned int mtc_period; uint32_t tsc_ctc_ratio_n; uint32_t tsc_ctc_ratio_d; + enum intel_pt_param_flags flags; }; struct intel_pt_decoder; @@ -102,7 +111,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder); unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, unsigned char *buf_b, size_t len_b, - bool have_tsc); + bool have_tsc, bool *consecutive); int intel_pt__strerror(int code, char *buf, size_t buflen); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index b1257c816310..e5c6caf913f3 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -17,6 +17,7 @@ #include <string.h> #include <endian.h> #include <byteswap.h> +#include <linux/compiler.h> #include "intel-pt-pkt-decoder.h" @@ -280,7 +281,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf, if (len < offs) return INTEL_PT_NEED_MORE_BYTES; byte = buf[offs++]; - payload |= (byte >> 1) << shift; + payload |= ((uint64_t)byte >> 1) << shift; } packet->type = INTEL_PT_CYC; @@ -292,36 +293,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte, const unsigned char *buf, size_t len, struct intel_pt_pkt *packet) { - switch (byte >> 5) { + int ip_len; + + packet->count = byte >> 5; + + switch (packet->count) { case 0: - packet->count = 0; + ip_len = 0; break; case 1: if (len < 3) return INTEL_PT_NEED_MORE_BYTES; - packet->count = 2; + ip_len = 2; packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); break; case 2: if (len < 5) return INTEL_PT_NEED_MORE_BYTES; - packet->count = 4; + ip_len = 4; packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); break; case 3: - case 6: + case 4: if (len < 7) return INTEL_PT_NEED_MORE_BYTES; - packet->count = 6; + ip_len = 6; memcpy_le64(&packet->payload, buf + 1, 6); break; + case 6: + if (len < 9) + return INTEL_PT_NEED_MORE_BYTES; + ip_len = 8; + packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1)); + break; default: return INTEL_PT_BAD_PACKET; } packet->type = type; - return packet->count + 1; + return ip_len + 1; } static int intel_pt_get_mode(const unsigned char *buf, size_t len, @@ -488,6 +499,7 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf, case INTEL_PT_FUP: if (!(packet->count)) return snprintf(buf, buf_len, "%s no ip", name); + __fallthrough; case INTEL_PT_CYC: case INTEL_PT_VMCS: case INTEL_PT_MTC: diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 9227c2f076c3..c8f2d084a8ce 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -125,6 +125,7 @@ struct intel_pt_queue { bool stop; bool step_through_buffers; bool use_buffer_pid_tid; + bool sync_switch; pid_t pid, tid; int cpu; int switch_state; @@ -188,14 +189,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, struct auxtrace_buffer *b) { + bool consecutive = false; void *start; start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, - pt->have_tsc); + pt->have_tsc, &consecutive); if (!start) return -EINVAL; b->use_size = b->data + b->size - start; b->use_data = start; + if (b->use_size && consecutive) + b->consecutive = true; return 0; } @@ -238,7 +242,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) } queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; - +next: buffer = auxtrace_buffer__next(queue, buffer); if (!buffer) { if (old_buffer) @@ -261,9 +265,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) return -ENOMEM; - if (old_buffer) - auxtrace_buffer__drop_data(old_buffer); - if (buffer->use_data) { b->len = buffer->use_size; b->buf = buffer->use_data; @@ -273,6 +274,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) } b->ref_timestamp = buffer->reference; + /* + * If in snapshot mode and the buffer has no usable data, get next + * buffer and again check overlap against old_buffer. + */ + if (ptq->pt->snapshot_mode && !b->len) + goto next; + + if (old_buffer) + auxtrace_buffer__drop_data(old_buffer); + if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode && !buffer->consecutive)) { b->consecutive = false; @@ -665,6 +676,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, unsigned int queue_nr) { struct intel_pt_params params = { .get_trace = 0, }; + struct perf_env *env = pt->machine->env; struct intel_pt_queue *ptq; ptq = zalloc(sizeof(struct intel_pt_queue)); @@ -742,6 +754,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, } } + if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) + params.flags |= INTEL_PT_FUP_WITH_NLIP; + ptq->decoder = intel_pt_decoder_new(¶ms); if (!ptq->decoder) goto out_free; @@ -842,10 +857,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt, if (pt->timeless_decoding || !pt->have_sched_switch) ptq->use_buffer_pid_tid = true; } + + ptq->sync_switch = pt->sync_switch; } if (!ptq->on_heap && - (!pt->sync_switch || + (!ptq->sync_switch || ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { const struct intel_pt_state *state; int ret; @@ -1228,11 +1245,12 @@ static int intel_pt_sample(struct intel_pt_queue *ptq) if (pt->synth_opts.last_branch) intel_pt_update_last_branch_rb(ptq); - if (!pt->sync_switch) + if (!ptq->sync_switch) return 0; if (intel_pt_is_switch_ip(ptq, state->to_ip)) { switch (ptq->switch_state) { + case INTEL_PT_SS_NOT_TRACING: case INTEL_PT_SS_UNKNOWN: case INTEL_PT_SS_EXPECTING_SWITCH_IP: err = intel_pt_next_tid(pt, ptq); @@ -1309,6 +1327,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) return switch_ip; } +static void intel_pt_enable_sync_switch(struct intel_pt *pt) +{ + unsigned int i; + + pt->sync_switch = true; + + for (i = 0; i < pt->queues.nr_queues; i++) { + struct auxtrace_queue *queue = &pt->queues.queue_array[i]; + struct intel_pt_queue *ptq = queue->priv; + + if (ptq) + ptq->sync_switch = true; + } +} + static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) { const struct intel_pt_state *state = ptq->state; @@ -1325,7 +1358,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) if (pt->switch_ip) { intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", pt->switch_ip, pt->ptss_ip); - pt->sync_switch = true; + intel_pt_enable_sync_switch(pt); } } } @@ -1341,9 +1374,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) if (state->err) { if (state->err == INTEL_PT_ERR_NODATA) return 1; - if (pt->sync_switch && + if (ptq->sync_switch && state->from_ip >= pt->kernel_start) { - pt->sync_switch = false; + ptq->sync_switch = false; intel_pt_next_tid(pt, ptq); } if (pt->synth_opts.errors) { @@ -1369,7 +1402,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) state->timestamp, state->est_timestamp); ptq->timestamp = state->est_timestamp; /* Use estimated TSC in unknown switch state */ - } else if (pt->sync_switch && + } else if (ptq->sync_switch && ptq->switch_state == INTEL_PT_SS_UNKNOWN && intel_pt_is_switch_ip(ptq, state->to_ip) && ptq->next_tid == -1) { @@ -1516,7 +1549,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, return 1; ptq = intel_pt_cpu_to_ptq(pt, cpu); - if (!ptq) + if (!ptq || !ptq->sync_switch) return 1; switch (ptq->switch_state) { diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c index b1b9e2385f4b..5e58149c4df2 100644 --- a/tools/perf/util/ordered-events.c +++ b/tools/perf/util/ordered-events.c @@ -79,7 +79,7 @@ static union perf_event *dup_event(struct ordered_events *oe, static void free_dup_event(struct ordered_events *oe, union perf_event *event) { - if (oe->copy_on_queue) { + if (event && oe->copy_on_queue) { oe->cur_alloc_size -= event->header.size; free(event); } @@ -150,6 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve list_move(&event->list, &oe->cache); oe->nr_events--; free_dup_event(oe, event->event); + event->event = NULL; } int ordered_events__queue(struct ordered_events *oe, union perf_event *event, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a35db828bd0d..e81dfb2e239c 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -138,11 +138,11 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) -#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ - while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ - if (sys_dirent.d_type == DT_DIR && \ - (strcmp(sys_dirent.d_name, ".")) && \ - (strcmp(sys_dirent.d_name, ".."))) +#define for_each_subsystem(sys_dir, sys_dirent) \ + while ((sys_dirent = readdir(sys_dir)) != NULL) \ + if (sys_dirent->d_type == DT_DIR && \ + (strcmp(sys_dirent->d_name, ".")) && \ + (strcmp(sys_dirent->d_name, ".."))) static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) { @@ -159,12 +159,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) return 0; } -#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ - while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ - if (evt_dirent.d_type == DT_DIR && \ - (strcmp(evt_dirent.d_name, ".")) && \ - (strcmp(evt_dirent.d_name, "..")) && \ - (!tp_event_has_id(&sys_dirent, &evt_dirent))) +#define for_each_event(sys_dirent, evt_dir, evt_dirent) \ + while ((evt_dirent = readdir(evt_dir)) != NULL) \ + if (evt_dirent->d_type == DT_DIR && \ + (strcmp(evt_dirent->d_name, ".")) && \ + (strcmp(evt_dirent->d_name, "..")) && \ + (!tp_event_has_id(sys_dirent, evt_dirent))) #define MAX_EVENT_LENGTH 512 @@ -173,7 +173,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) { struct tracepoint_path *path = NULL; DIR *sys_dir, *evt_dir; - struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + struct dirent *sys_dirent, *evt_dirent; char id_buf[24]; int fd; u64 id; @@ -184,18 +184,18 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) if (!sys_dir) return NULL; - for_each_subsystem(sys_dir, sys_dirent, sys_next) { + for_each_subsystem(sys_dir, sys_dirent) { snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent.d_name); + sys_dirent->d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; - for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + for_each_event(sys_dirent, evt_dir, evt_dirent) { snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, - evt_dirent.d_name); + evt_dirent->d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) continue; @@ -220,9 +220,9 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) free(path); return NULL; } - strncpy(path->system, sys_dirent.d_name, + strncpy(path->system, sys_dirent->d_name, MAX_EVENT_LENGTH); - strncpy(path->name, evt_dirent.d_name, + strncpy(path->name, evt_dirent->d_name, MAX_EVENT_LENGTH); return path; } @@ -291,10 +291,11 @@ __add_event(struct list_head *list, int *idx, event_attr_init(attr); - evsel = perf_evsel__new_idx(attr, (*idx)++); + evsel = perf_evsel__new_idx(attr, *idx); if (!evsel) return NULL; + (*idx)++; evsel->cpus = cpu_map__get(cpus); evsel->own_cpus = cpu_map__get(cpus); @@ -1629,7 +1630,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, bool name_only) { DIR *sys_dir, *evt_dir; - struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + struct dirent *sys_dirent, *evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; char **evt_list = NULL; @@ -1647,20 +1648,20 @@ restart: goto out_close_sys_dir; } - for_each_subsystem(sys_dir, sys_dirent, sys_next) { + for_each_subsystem(sys_dir, sys_dirent) { if (subsys_glob != NULL && - !strglobmatch(sys_dirent.d_name, subsys_glob)) + !strglobmatch(sys_dirent->d_name, subsys_glob)) continue; snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent.d_name); + sys_dirent->d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; - for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + for_each_event(sys_dirent, evt_dir, evt_dirent) { if (event_glob != NULL && - !strglobmatch(evt_dirent.d_name, event_glob)) + !strglobmatch(evt_dirent->d_name, event_glob)) continue; if (!evt_num_known) { @@ -1669,7 +1670,7 @@ restart: } snprintf(evt_path, MAXPATHLEN, "%s:%s", - sys_dirent.d_name, evt_dirent.d_name); + sys_dirent->d_name, evt_dirent->d_name); evt_list[evt_i] = strdup(evt_path); if (evt_list[evt_i] == NULL) @@ -1722,7 +1723,7 @@ out_close_sys_dir: int is_valid_tracepoint(const char *event_string) { DIR *sys_dir, *evt_dir; - struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + struct dirent *sys_dirent, *evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; @@ -1730,17 +1731,17 @@ int is_valid_tracepoint(const char *event_string) if (!sys_dir) return 0; - for_each_subsystem(sys_dir, sys_dirent, sys_next) { + for_each_subsystem(sys_dir, sys_dirent) { snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent.d_name); + sys_dirent->d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; - for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + for_each_event(sys_dirent, evt_dir, evt_dirent) { snprintf(evt_path, MAXPATHLEN, "%s:%s", - sys_dirent.d_name, evt_dirent.d_name); + sys_dirent->d_name, evt_dirent->d_name); if (!strcmp(evt_path, event_string)) { closedir(evt_dir); closedir(sys_dir); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 6f2a0279476c..593066c68e3d 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -153,7 +153,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n if (fd == -1) return -1; - sret = read(fd, alias->unit, UNIT_MAX_LEN); + sret = read(fd, alias->unit, UNIT_MAX_LEN); if (sret < 0) goto error; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 03875f9154e7..0195b7e8c54a 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -2349,6 +2349,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base, out: free(nbase); + + /* Final validation */ + if (ret >= 0 && !is_c_func_name(buf)) { + pr_warning("Internal error: \"%s\" is an invalid event name.\n", + buf); + ret = -EINVAL; + } + return ret; } diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 05012bb178d7..fdd87c7e3e91 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1460,16 +1460,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, Dwarf_Addr _addr = 0, baseaddr = 0; const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; - bool reloc = false; -retry: + /* We always need to relocate the address for aranges */ + if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) + addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { - if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { - addr += baseaddr; - reloc = true; - goto retry; - } pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build index 6516e220c247..82d28c67e0f3 100644 --- a/tools/perf/util/scripting-engines/Build +++ b/tools/perf/util/scripting-engines/Build @@ -1,6 +1,6 @@ libperf-$(CONFIG_LIBPERL) += trace-event-perl.o libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o -CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default +CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 468de95bc8bb..0ae4f73dc8eb 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -135,8 +135,14 @@ struct perf_session *perf_session__new(struct perf_data_file *file, if (perf_session__open(session) < 0) goto out_close; - perf_session__set_id_hdr_size(session); - perf_session__set_comm_exec(session); + /* + * set session attributes that are present in perf.data + * but not in pipe-mode. + */ + if (!file->is_pipe) { + perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); + } } } else { session->machines.host.env = &perf_env; @@ -151,7 +157,11 @@ struct perf_session *perf_session__new(struct perf_data_file *file, pr_warning("Cannot read kernel map\n"); } - if (tool && tool->ordering_requires_timestamps && + /* + * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is + * processed, so perf_evlist__sample_id_all is not meaningful here. + */ + if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps && tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); tool->ordered_events = false; @@ -1437,6 +1447,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session) buf = malloc(cur_size); if (!buf) return -errno; + ordered_events__set_copy_on_queue(oe, true); more: event = buf; err = readn(fd, event, sizeof(struct perf_event_header)); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 2d8ccd4d9e1b..87312056f75d 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -604,6 +604,9 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, static int64_t sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) { + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + return left->branch_info->flags.cycles - right->branch_info->flags.cycles; } @@ -611,6 +614,8 @@ sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { + if (!he->branch_info) + return scnprintf(bf, size, "%-.*s", width, "N/A"); if (he->branch_info->flags.cycles == 0) return repsep_snprintf(bf, size, "%-*s", width, "-"); return repsep_snprintf(bf, size, "%-*hd", width, diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 4a3a72cb5805..6ce624cb7001 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -311,6 +311,16 @@ int perf_stat_process_counter(struct perf_stat_config *config, aggr->val = aggr->ena = aggr->run = 0; + /* + * We calculate counter's data every interval, + * and the display code shows ps->res_stats + * avg value. We need to zero the stats for + * interval mode, otherwise overall avg running + * averages will be shown for each interval. + */ + if (config->interval) + init_stats(ps->res_stats); + if (counter->per_pkg) zero_per_pkg(counter); diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c index bcae659b6546..efb53772e0ec 100644 --- a/tools/perf/util/strfilter.c +++ b/tools/perf/util/strfilter.c @@ -269,6 +269,7 @@ static int strfilter_node__sprint(struct strfilter_node *node, char *buf) len = strfilter_node__sprint_pt(node->l, buf); if (len < 0) return len; + __fallthrough; case '!': if (buf) { *(buf + len++) = *node->p; diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index fc8781de62db..accb7ece1d3c 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -21,6 +21,8 @@ s64 perf_atoll(const char *str) case 'b': case 'B': if (*p) goto out_err; + + __fallthrough; case '\0': return length; default: diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 475d88d0a1c9..7c97ecaeae48 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -488,6 +488,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) break; } else { int n = namesz + descsz; + + if (n > (int)sizeof(bf)) { + n = sizeof(bf); + pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n", + __func__, filename, nhdr.n_namesz, nhdr.n_descsz); + } if (read(fd, bf, n) != n) break; } @@ -1091,9 +1097,8 @@ new_symbol: * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - if (!symbol_conf.allow_aliases) - symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); + symbols__fixup_duplicate(&dso->symbols[map->type]); if (kmap) { /* * We need to fixup this here too because we create new diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index cd08027a6d2c..415be561fad3 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -151,6 +151,9 @@ void symbols__fixup_duplicate(struct rb_root *symbols) struct rb_node *nd; struct symbol *curr, *next; + if (symbol_conf.allow_aliases) + return; + nd = rb_first(symbols); while (nd) { @@ -197,7 +200,7 @@ void symbols__fixup_end(struct rb_root *symbols) /* Last entry */ if (curr->end == curr->start) - curr->end = roundup(curr->start, 4096); + curr->end = roundup(curr->start, 4096) + 4096; } void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) @@ -1275,8 +1278,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, if (kallsyms__delta(map, filename, &delta)) return -1; - symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); + symbols__fixup_duplicate(&dso->symbols[map->type]); if (dso->kernel == DSO_TYPE_GUEST_KERNEL) dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 0a9ae8014729..829508a21448 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -227,7 +227,7 @@ void thread__find_cpumode_addr_location(struct thread *thread, struct addr_location *al) { size_t i; - const u8 const cpumodes[] = { + const u8 cpumodes[] = { PERF_RECORD_MISC_USER, PERF_RECORD_MISC_KERNEL, PERF_RECORD_MISC_GUEST_USER, diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index 6ec3c5ca438f..4e666b95b87e 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -92,8 +92,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) { DIR *proc; int max_threads = 32, items, i; - char path[256]; - struct dirent dirent, *next, **namelist = NULL; + char path[NAME_MAX + 1 + 6]; + struct dirent *dirent, **namelist = NULL; struct thread_map *threads = thread_map__alloc(max_threads); if (threads == NULL) @@ -106,16 +106,16 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) threads->nr = 0; atomic_set(&threads->refcnt, 1); - while (!readdir_r(proc, &dirent, &next) && next) { + while ((dirent = readdir(proc)) != NULL) { char *end; bool grow = false; struct stat st; - pid_t pid = strtol(dirent.d_name, &end, 10); + pid_t pid = strtol(dirent->d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; - snprintf(path, sizeof(path), "/proc/%s", dirent.d_name); + snprintf(path, sizeof(path), "/proc/%s", dirent->d_name); if (stat(path, &st) != 0) continue; diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 9df61059a85d..a2fd6e79d5a5 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -95,7 +95,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) if (err) die("error registering py script extension"); - scripting_context = malloc(sizeof(struct scripting_context)); + if (scripting_context == NULL) + scripting_context = malloc(sizeof(*scripting_context)); } #ifdef NO_LIBPYTHON @@ -159,7 +160,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) if (err) die("error registering pl script extension"); - scripting_context = malloc(sizeof(struct scripting_context)); + if (scripting_context == NULL) + scripting_context = malloc(sizeof(*scripting_context)); } #ifdef NO_LIBPERL diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 2dcfe9a7c8d0..60edec383281 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -37,6 +37,14 @@ static int __report_module(struct addr_location *al, u64 ip, return 0; mod = dwfl_addrmodule(ui->dwfl, ip); + if (mod) { + Dwarf_Addr s; + + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); + if (s != al->map->start) + mod = 0; + } + if (!mod) mod = dwfl_report_elf(ui->dwfl, dso->short_name, dso->long_name, -1, al->map->start, diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 47b1e36c7ea0..9adc9af8b048 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -162,7 +162,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) size -= ret; off_in += ret; - off_out -= ret; + off_out += ret; } munmap(ptr, off_in + size); |
