14 struct callchain_param callchain_param = {
15 .mode = CHAIN_GRAPH_REL,
19 u16 hists__col_len(struct hists *self, enum hist_column col)
21 return self->col_len[col];
24 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
26 self->col_len[col] = len;
29 bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
31 if (len > hists__col_len(self, col)) {
32 hists__set_col_len(self, col, len);
38 static void hists__reset_col_len(struct hists *self)
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(self, col, 0);
46 static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
51 hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
53 len = thread__comm_len(h->thread);
54 if (hists__new_col_len(self, HISTC_COMM, len))
55 hists__set_col_len(self, HISTC_THREAD, len + 6);
58 len = dso__name_len(h->ms.map->dso);
59 hists__new_col_len(self, HISTC_DSO, len);
63 static void hist_entry__add_cpumode_period(struct hist_entry *self,
64 unsigned int cpumode, u64 period)
67 case PERF_RECORD_MISC_KERNEL:
68 self->period_sys += period;
70 case PERF_RECORD_MISC_USER:
71 self->period_us += period;
73 case PERF_RECORD_MISC_GUEST_KERNEL:
74 self->period_guest_sys += period;
76 case PERF_RECORD_MISC_GUEST_USER:
77 self->period_guest_us += period;
85 * histogram, sorted on item, collects periods
88 static struct hist_entry *hist_entry__new(struct hist_entry *template)
90 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
91 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
96 if (symbol_conf.use_callchain)
97 callchain_init(self->callchain);
103 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
106 hists__calc_col_len(self, h);
111 static u8 symbol__parent_filter(const struct symbol *parent)
113 if (symbol_conf.exclude_other && parent == NULL)
114 return 1 << HIST_FILTER__PARENT;
118 struct hist_entry *__hists__add_entry(struct hists *self,
119 struct addr_location *al,
120 struct symbol *sym_parent, u64 period)
122 struct rb_node **p = &self->entries.rb_node;
123 struct rb_node *parent = NULL;
124 struct hist_entry *he;
125 struct hist_entry entry = {
126 .thread = al->thread,
135 .parent = sym_parent,
136 .filtered = symbol__parent_filter(sym_parent),
142 he = rb_entry(parent, struct hist_entry, rb_node);
144 cmp = hist_entry__cmp(&entry, he);
147 he->period += period;
158 he = hist_entry__new(&entry);
161 rb_link_node(&he->rb_node, parent, p);
162 rb_insert_color(&he->rb_node, &self->entries);
163 hists__inc_nr_entries(self, he);
165 hist_entry__add_cpumode_period(he, al->cpumode, period);
170 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
172 struct sort_entry *se;
175 list_for_each_entry(se, &hist_entry__sort_list, list) {
176 cmp = se->se_cmp(left, right);
185 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
187 struct sort_entry *se;
190 list_for_each_entry(se, &hist_entry__sort_list, list) {
191 int64_t (*f)(struct hist_entry *, struct hist_entry *);
193 f = se->se_collapse ?: se->se_cmp;
195 cmp = f(left, right);
203 void hist_entry__free(struct hist_entry *he)
209 * collapse the histogram
212 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
214 struct rb_node **p = &root->rb_node;
215 struct rb_node *parent = NULL;
216 struct hist_entry *iter;
221 iter = rb_entry(parent, struct hist_entry, rb_node);
223 cmp = hist_entry__collapse(iter, he);
226 iter->period += he->period;
227 hist_entry__free(he);
237 rb_link_node(&he->rb_node, parent, p);
238 rb_insert_color(&he->rb_node, root);
242 void hists__collapse_resort(struct hists *self)
245 struct rb_node *next;
246 struct hist_entry *n;
248 if (!sort__need_collapse)
252 next = rb_first(&self->entries);
253 self->nr_entries = 0;
254 hists__reset_col_len(self);
257 n = rb_entry(next, struct hist_entry, rb_node);
258 next = rb_next(&n->rb_node);
260 rb_erase(&n->rb_node, &self->entries);
261 if (collapse__insert_entry(&tmp, n))
262 hists__inc_nr_entries(self, n);
269 * reverse the map, sort on period.
272 static void __hists__insert_output_entry(struct rb_root *entries,
273 struct hist_entry *he,
274 u64 min_callchain_hits)
276 struct rb_node **p = &entries->rb_node;
277 struct rb_node *parent = NULL;
278 struct hist_entry *iter;
280 if (symbol_conf.use_callchain)
281 callchain_param.sort(&he->sorted_chain, he->callchain,
282 min_callchain_hits, &callchain_param);
286 iter = rb_entry(parent, struct hist_entry, rb_node);
288 if (he->period > iter->period)
294 rb_link_node(&he->rb_node, parent, p);
295 rb_insert_color(&he->rb_node, entries);
298 void hists__output_resort(struct hists *self)
301 struct rb_node *next;
302 struct hist_entry *n;
303 u64 min_callchain_hits;
305 min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
308 next = rb_first(&self->entries);
310 self->nr_entries = 0;
311 hists__reset_col_len(self);
314 n = rb_entry(next, struct hist_entry, rb_node);
315 next = rb_next(&n->rb_node);
317 rb_erase(&n->rb_node, &self->entries);
318 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
319 hists__inc_nr_entries(self, n);
325 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
328 int ret = fprintf(fp, " ");
330 for (i = 0; i < left_margin; i++)
331 ret += fprintf(fp, " ");
336 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
340 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
342 for (i = 0; i < depth; i++)
343 if (depth_mask & (1 << i))
344 ret += fprintf(fp, "| ");
346 ret += fprintf(fp, " ");
348 ret += fprintf(fp, "\n");
353 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
354 int depth, int depth_mask, int period,
355 u64 total_samples, int hits,
361 ret += callchain__fprintf_left_margin(fp, left_margin);
362 for (i = 0; i < depth; i++) {
363 if (depth_mask & (1 << i))
364 ret += fprintf(fp, "|");
366 ret += fprintf(fp, " ");
367 if (!period && i == depth - 1) {
370 percent = hits * 100.0 / total_samples;
371 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
373 ret += fprintf(fp, "%s", " ");
376 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
378 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
383 static struct symbol *rem_sq_bracket;
384 static struct callchain_list rem_hits;
386 static void init_rem_hits(void)
388 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
389 if (!rem_sq_bracket) {
390 fprintf(stderr, "Not enough memory to display remaining hits\n");
394 strcpy(rem_sq_bracket->name, "[...]");
395 rem_hits.ms.sym = rem_sq_bracket;
398 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
399 u64 total_samples, int depth,
400 int depth_mask, int left_margin)
402 struct rb_node *node, *next;
403 struct callchain_node *child;
404 struct callchain_list *chain;
405 int new_depth_mask = depth_mask;
410 uint entries_printed = 0;
412 if (callchain_param.mode == CHAIN_GRAPH_REL)
413 new_total = self->children_hit;
415 new_total = total_samples;
417 remaining = new_total;
419 node = rb_first(&self->rb_root);
423 child = rb_entry(node, struct callchain_node, rb_node);
424 cumul = cumul_hits(child);
428 * The depth mask manages the output of pipes that show
429 * the depth. We don't want to keep the pipes of the current
430 * level for the last child of this depth.
431 * Except if we have remaining filtered hits. They will
432 * supersede the last child
434 next = rb_next(node);
435 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
436 new_depth_mask &= ~(1 << (depth - 1));
439 * But we keep the older depth mask for the line separator
440 * to keep the level link until we reach the last child
442 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
445 list_for_each_entry(chain, &child->val, list) {
446 ret += ipchain__fprintf_graph(fp, chain, depth,
452 ret += __callchain__fprintf_graph(fp, child, new_total,
454 new_depth_mask | (1 << depth),
457 if (++entries_printed == callchain_param.print_limit)
461 if (callchain_param.mode == CHAIN_GRAPH_REL &&
462 remaining && remaining != new_total) {
467 new_depth_mask &= ~(1 << (depth - 1));
469 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
470 new_depth_mask, 0, new_total,
471 remaining, left_margin);
477 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
478 u64 total_samples, int left_margin)
480 struct callchain_list *chain;
481 bool printed = false;
484 u32 entries_printed = 0;
486 list_for_each_entry(chain, &self->val, list) {
487 if (!i++ && sort__first_dimension == SORT_SYM)
491 ret += callchain__fprintf_left_margin(fp, left_margin);
492 ret += fprintf(fp, "|\n");
493 ret += callchain__fprintf_left_margin(fp, left_margin);
494 ret += fprintf(fp, "---");
499 ret += callchain__fprintf_left_margin(fp, left_margin);
502 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
504 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
506 if (++entries_printed == callchain_param.print_limit)
510 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
515 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
518 struct callchain_list *chain;
524 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
527 list_for_each_entry(chain, &self->val, list) {
528 if (chain->ip >= PERF_CONTEXT_MAX)
531 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
533 ret += fprintf(fp, " %p\n",
534 (void *)(long)chain->ip);
540 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
541 u64 total_samples, int left_margin)
543 struct rb_node *rb_node;
544 struct callchain_node *chain;
546 u32 entries_printed = 0;
548 rb_node = rb_first(&self->sorted_chain);
552 chain = rb_entry(rb_node, struct callchain_node, rb_node);
553 percent = chain->hit * 100.0 / total_samples;
554 switch (callchain_param.mode) {
556 ret += percent_color_fprintf(fp, " %6.2f%%\n",
558 ret += callchain__fprintf_flat(fp, chain, total_samples);
560 case CHAIN_GRAPH_ABS: /* Falldown */
561 case CHAIN_GRAPH_REL:
562 ret += callchain__fprintf_graph(fp, chain, total_samples,
568 ret += fprintf(fp, "\n");
569 if (++entries_printed == callchain_param.print_limit)
571 rb_node = rb_next(rb_node);
577 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
578 struct hists *hists, struct hists *pair_hists,
579 bool show_displacement, long displacement,
580 bool color, u64 session_total)
582 struct sort_entry *se;
583 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
584 const char *sep = symbol_conf.field_sep;
587 if (symbol_conf.exclude_other && !self->parent)
591 period = self->pair ? self->pair->period : 0;
592 total = pair_hists->stats.total_period;
593 period_sys = self->pair ? self->pair->period_sys : 0;
594 period_us = self->pair ? self->pair->period_us : 0;
595 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
596 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
598 period = self->period;
599 total = session_total;
600 period_sys = self->period_sys;
601 period_us = self->period_us;
602 period_guest_sys = self->period_guest_sys;
603 period_guest_us = self->period_guest_us;
608 ret = percent_color_snprintf(s, size,
609 sep ? "%.2f" : " %6.2f%%",
610 (period * 100.0) / total);
612 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
613 (period * 100.0) / total);
614 if (symbol_conf.show_cpu_utilization) {
615 ret += percent_color_snprintf(s + ret, size - ret,
616 sep ? "%.2f" : " %6.2f%%",
617 (period_sys * 100.0) / total);
618 ret += percent_color_snprintf(s + ret, size - ret,
619 sep ? "%.2f" : " %6.2f%%",
620 (period_us * 100.0) / total);
622 ret += percent_color_snprintf(s + ret,
624 sep ? "%.2f" : " %6.2f%%",
625 (period_guest_sys * 100.0) /
627 ret += percent_color_snprintf(s + ret,
629 sep ? "%.2f" : " %6.2f%%",
630 (period_guest_us * 100.0) /
635 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
637 if (symbol_conf.show_nr_samples) {
639 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
641 ret += snprintf(s + ret, size - ret, "%11lld", period);
646 double old_percent = 0, new_percent = 0, diff;
649 old_percent = (period * 100.0) / total;
650 if (session_total > 0)
651 new_percent = (self->period * 100.0) / session_total;
653 diff = new_percent - old_percent;
655 if (fabs(diff) >= 0.01)
656 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
658 snprintf(bf, sizeof(bf), " ");
661 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
663 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
665 if (show_displacement) {
667 snprintf(bf, sizeof(bf), "%+4ld", displacement);
669 snprintf(bf, sizeof(bf), " ");
672 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
674 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
678 list_for_each_entry(se, &hist_entry__sort_list, list) {
682 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
683 ret += se->se_snprintf(self, s + ret, size - ret,
684 hists__col_len(hists, se->se_width_idx));
690 int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
691 struct hists *pair_hists, bool show_displacement,
692 long displacement, FILE *fp, u64 session_total)
695 hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
696 show_displacement, displacement,
697 true, session_total);
698 return fprintf(fp, "%s\n", bf);
701 static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
702 struct hists *hists, FILE *fp,
707 if (sort__first_dimension == SORT_COMM) {
708 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
710 left_margin = hists__col_len(hists, se->se_width_idx);
711 left_margin -= thread__comm_len(self->thread);
714 return hist_entry_callchain__fprintf(fp, self, session_total,
718 size_t hists__fprintf(struct hists *self, struct hists *pair,
719 bool show_displacement, FILE *fp)
721 struct sort_entry *se;
724 unsigned long position = 1;
725 long displacement = 0;
727 const char *sep = symbol_conf.field_sep;
728 const char *col_width = symbol_conf.col_width_list_str;
732 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
734 if (symbol_conf.show_nr_samples) {
736 fprintf(fp, "%cSamples", *sep);
738 fputs(" Samples ", fp);
741 if (symbol_conf.show_cpu_utilization) {
743 ret += fprintf(fp, "%csys", *sep);
744 ret += fprintf(fp, "%cus", *sep);
746 ret += fprintf(fp, "%cguest sys", *sep);
747 ret += fprintf(fp, "%cguest us", *sep);
750 ret += fprintf(fp, " sys ");
751 ret += fprintf(fp, " us ");
753 ret += fprintf(fp, " guest sys ");
754 ret += fprintf(fp, " guest us ");
761 ret += fprintf(fp, "%cDelta", *sep);
763 ret += fprintf(fp, " Delta ");
765 if (show_displacement) {
767 ret += fprintf(fp, "%cDisplacement", *sep);
769 ret += fprintf(fp, " Displ");
773 list_for_each_entry(se, &hist_entry__sort_list, list) {
777 fprintf(fp, "%c%s", *sep, se->se_header);
780 width = strlen(se->se_header);
781 if (symbol_conf.col_width_list_str) {
783 hists__set_col_len(self, se->se_width_idx,
785 col_width = strchr(col_width, ',');
790 if (!hists__new_col_len(self, se->se_width_idx, width))
791 width = hists__col_len(self, se->se_width_idx);
792 fprintf(fp, " %*s", width, se->se_header);
799 fprintf(fp, "# ........");
800 if (symbol_conf.show_nr_samples)
801 fprintf(fp, " ..........");
803 fprintf(fp, " ..........");
804 if (show_displacement)
805 fprintf(fp, " .....");
807 list_for_each_entry(se, &hist_entry__sort_list, list) {
814 width = hists__col_len(self, se->se_width_idx);
816 width = strlen(se->se_header);
817 for (i = 0; i < width; i++)
821 fprintf(fp, "\n#\n");
824 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
825 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
827 if (show_displacement) {
829 displacement = ((long)h->pair->position -
835 ret += hist_entry__fprintf(h, self, pair, show_displacement,
836 displacement, fp, self->stats.total_period);
838 if (symbol_conf.use_callchain)
839 ret += hist_entry__fprintf_callchain(h, self, fp,
840 self->stats.total_period);
841 if (h->ms.map == NULL && verbose > 1) {
842 __map_groups__fprintf_maps(&h->thread->mg,
843 MAP__FUNCTION, verbose, fp);
844 fprintf(fp, "%.10s end\n", graph_dotted_line);
848 free(rem_sq_bracket);
854 * See hists__fprintf to match the column widths
856 unsigned int hists__sort_list_width(struct hists *self)
858 struct sort_entry *se;
859 int ret = 9; /* total % */
861 if (symbol_conf.show_cpu_utilization) {
862 ret += 7; /* count_sys % */
863 ret += 6; /* count_us % */
865 ret += 13; /* count_guest_sys % */
866 ret += 12; /* count_guest_us % */
870 if (symbol_conf.show_nr_samples)
873 list_for_each_entry(se, &hist_entry__sort_list, list)
875 ret += 2 + hists__col_len(self, se->se_width_idx);
880 static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
881 enum hist_filter filter)
883 h->filtered &= ~(1 << filter);
888 self->stats.total_period += h->period;
889 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
891 hists__calc_col_len(self, h);
894 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
898 self->nr_entries = self->stats.total_period = 0;
899 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
900 hists__reset_col_len(self);
902 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
903 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
905 if (symbol_conf.exclude_other && !h->parent)
908 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
909 h->filtered |= (1 << HIST_FILTER__DSO);
913 hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
917 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
921 self->nr_entries = self->stats.total_period = 0;
922 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
923 hists__reset_col_len(self);
925 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
926 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
928 if (thread != NULL && h->thread != thread) {
929 h->filtered |= (1 << HIST_FILTER__THREAD);
933 hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
937 static int symbol__alloc_hist(struct symbol *self)
939 struct sym_priv *priv = symbol__priv(self);
940 const int size = (sizeof(*priv->hist) +
941 (self->end - self->start) * sizeof(u64));
943 priv->hist = zalloc(size);
944 return priv->hist == NULL ? -1 : 0;
947 int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
949 unsigned int sym_size, offset;
950 struct symbol *sym = self->ms.sym;
951 struct sym_priv *priv;
954 if (!sym || !self->ms.map)
957 priv = symbol__priv(sym);
958 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
961 sym_size = sym->end - sym->start;
962 offset = ip - sym->start;
964 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
966 if (offset >= sym_size)
973 pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
974 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
978 static struct objdump_line *objdump_line__new(s64 offset, char *line)
980 struct objdump_line *self = malloc(sizeof(*self));
983 self->offset = offset;
990 void objdump_line__free(struct objdump_line *self)
996 static void objdump__add_line(struct list_head *head, struct objdump_line *line)
998 list_add_tail(&line->node, head);
1001 struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
1002 struct objdump_line *pos)
1004 list_for_each_entry_continue(pos, head, node)
1005 if (pos->offset >= 0)
1011 static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1012 struct list_head *head)
1014 struct symbol *sym = self->ms.sym;
1015 struct objdump_line *objdump_line;
1016 char *line = NULL, *tmp, *tmp2, *c;
1018 s64 line_ip, offset = -1;
1020 if (getline(&line, &line_len, file) < 0)
1026 while (line_len != 0 && isspace(line[line_len - 1]))
1027 line[--line_len] = '\0';
1029 c = strchr(line, '\n');
1036 * Strip leading spaces:
1047 * Parse hexa addresses followed by ':'
1049 line_ip = strtoull(tmp, &tmp2, 16);
1050 if (*tmp2 != ':' || tmp == tmp2)
1054 if (line_ip != -1) {
1055 u64 start = map__rip_2objdump(self->ms.map, sym->start);
1056 offset = line_ip - start;
1059 objdump_line = objdump_line__new(offset, line);
1060 if (objdump_line == NULL) {
1064 objdump__add_line(head, objdump_line);
1069 int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
1071 struct symbol *sym = self->ms.sym;
1072 struct map *map = self->ms.map;
1073 struct dso *dso = map->dso;
1074 char *filename = dso__build_id_filename(dso, NULL, 0);
1075 bool free_filename = true;
1076 char command[PATH_MAX * 2];
1081 if (filename == NULL) {
1082 if (dso->has_build_id) {
1083 pr_err("Can't annotate %s: not enough memory\n",
1088 } else if (readlink(filename, command, sizeof(command)) < 0 ||
1089 strstr(command, "[kernel.kallsyms]") ||
1090 access(filename, R_OK)) {
1094 * If we don't have build-ids or the build-id file isn't in the
1095 * cache, or is just a kallsyms file, well, lets hope that this
1096 * DSO is the same as when 'perf record' ran.
1098 filename = dso->long_name;
1099 free_filename = false;
1102 if (dso->origin == DSO__ORIG_KERNEL) {
1103 if (dso->annotate_warned)
1104 goto out_free_filename;
1106 dso->annotate_warned = 1;
1107 pr_err("Can't annotate %s: No vmlinux file was found in the "
1108 "path\n", sym->name);
1109 goto out_free_filename;
1112 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1113 filename, sym->name, map->unmap_ip(map, sym->start),
1114 map->unmap_ip(map, sym->end));
1116 len = sym->end - sym->start;
1118 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1119 dso, dso->long_name, sym, sym->name);
1121 snprintf(command, sizeof(command),
1122 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
1123 map__rip_2objdump(map, sym->start),
1124 map__rip_2objdump(map, sym->end),
1125 filename, filename);
1127 pr_debug("Executing: %s\n", command);
1129 file = popen(command, "r");
1131 goto out_free_filename;
1134 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1144 void hists__inc_nr_events(struct hists *self, u32 type)
1146 ++self->stats.nr_events[0];
1147 ++self->stats.nr_events[type];
1150 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1155 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1156 if (!event__name[i])
1158 ret += fprintf(fp, "%10s events: %10d\n",
1159 event__name[i], self->stats.nr_events[i]);