1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
18 static int perf_session__open(struct perf_session *self, bool force)
20 struct stat input_stat;
22 if (!strcmp(self->filename, "-")) {
24 self->fd = STDIN_FILENO;
26 if (perf_session__read_header(self, self->fd) < 0)
27 pr_err("incompatible file format (rerun with -v to learn more)");
32 self->fd = open(self->filename, O_RDONLY);
36 pr_err("failed to open %s: %s", self->filename, strerror(err));
37 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
38 pr_err(" (try 'perf record' first)");
43 if (fstat(self->fd, &input_stat) < 0)
46 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
47 pr_err("file %s not owned by current user or root\n",
52 if (!input_stat.st_size) {
53 pr_info("zero-sized file (%s), nothing to do!\n",
58 if (perf_session__read_header(self, self->fd) < 0) {
59 pr_err("incompatible file format (rerun with -v to learn more)");
63 if (!perf_evlist__valid_sample_type(self->evlist)) {
64 pr_err("non matching sample_type");
68 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
69 pr_err("non matching sample_id_all");
73 self->size = input_stat.st_size;
82 void perf_session__update_sample_type(struct perf_session *self)
84 self->sample_type = perf_evlist__sample_type(self->evlist);
85 self->sample_size = __perf_evsel__sample_size(self->sample_type);
86 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
87 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
88 self->host_machine.id_hdr_size = self->id_hdr_size;
91 int perf_session__create_kernel_maps(struct perf_session *self)
93 int ret = machine__create_kernel_maps(&self->host_machine);
96 ret = machines__create_guest_kernel_maps(&self->machines);
100 static void perf_session__destroy_kernel_maps(struct perf_session *self)
102 machine__destroy_kernel_maps(&self->host_machine);
103 machines__destroy_guest_kernel_maps(&self->machines);
106 struct perf_session *perf_session__new(const char *filename, int mode,
107 bool force, bool repipe,
108 struct perf_tool *tool)
110 struct perf_session *self;
114 if (!filename || !strlen(filename)) {
115 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
118 filename = "perf.data";
121 len = strlen(filename);
122 self = zalloc(sizeof(*self) + len);
127 memcpy(self->filename, filename, len);
129 * On 64bit we can mmap the data file in one go. No need for tiny mmap
130 * slices. On 32bit we use 32MB.
132 #if BITS_PER_LONG == 64
133 self->mmap_window = ULLONG_MAX;
135 self->mmap_window = 32 * 1024 * 1024ULL;
137 self->machines = RB_ROOT;
138 self->repipe = repipe;
139 INIT_LIST_HEAD(&self->ordered_samples.samples);
140 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
141 INIT_LIST_HEAD(&self->ordered_samples.to_free);
142 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
144 if (mode == O_RDONLY) {
145 if (perf_session__open(self, force) < 0)
147 perf_session__update_sample_type(self);
148 } else if (mode == O_WRONLY) {
150 * In O_RDONLY mode this will be performed when reading the
151 * kernel MMAP event, in perf_event__process_mmap().
153 if (perf_session__create_kernel_maps(self) < 0)
157 if (tool && tool->ordering_requires_timestamps &&
158 tool->ordered_samples && !self->sample_id_all) {
159 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
160 tool->ordered_samples = false;
166 perf_session__delete(self);
170 static void machine__delete_dead_threads(struct machine *machine)
172 struct thread *n, *t;
174 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
180 static void perf_session__delete_dead_threads(struct perf_session *session)
182 machine__delete_dead_threads(&session->host_machine);
185 static void machine__delete_threads(struct machine *self)
187 struct rb_node *nd = rb_first(&self->threads);
190 struct thread *t = rb_entry(nd, struct thread, rb_node);
192 rb_erase(&t->rb_node, &self->threads);
198 static void perf_session__delete_threads(struct perf_session *session)
200 machine__delete_threads(&session->host_machine);
203 void perf_session__delete(struct perf_session *self)
205 perf_session__destroy_kernel_maps(self);
206 perf_session__delete_dead_threads(self);
207 perf_session__delete_threads(self);
208 machine__exit(&self->host_machine);
213 void machine__remove_thread(struct machine *self, struct thread *th)
215 self->last_match = NULL;
216 rb_erase(&th->rb_node, &self->threads);
218 * We may have references to this thread, for instance in some hist_entry
219 * instances, so just move them to a separate list.
221 list_add_tail(&th->node, &self->dead_threads);
224 static bool symbol__match_parent_regex(struct symbol *sym)
226 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
232 static const u8 cpumodes[] = {
233 PERF_RECORD_MISC_USER,
234 PERF_RECORD_MISC_KERNEL,
235 PERF_RECORD_MISC_GUEST_USER,
236 PERF_RECORD_MISC_GUEST_KERNEL
238 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
240 static void ip__resolve_ams(struct machine *self, struct thread *thread,
241 struct addr_map_symbol *ams,
244 struct addr_location al;
248 memset(&al, 0, sizeof(al));
250 for (i = 0; i < NCPUMODES; i++) {
253 * We cannot use the header.misc hint to determine whether a
254 * branch stack address is user, kernel, guest, hypervisor.
255 * Branches may straddle the kernel/user/hypervisor boundaries.
256 * Thus, we have to try consecutively until we find a match
257 * or else, the symbol is unknown
259 thread__find_addr_location(thread, self, m, MAP__FUNCTION,
266 ams->al_addr = al.addr;
271 struct branch_info *machine__resolve_bstack(struct machine *self,
273 struct branch_stack *bs)
275 struct branch_info *bi;
278 bi = calloc(bs->nr, sizeof(struct branch_info));
282 for (i = 0; i < bs->nr; i++) {
283 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
284 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
285 bi[i].flags = bs->entries[i].flags;
290 int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
291 struct thread *thread,
292 struct ip_callchain *chain,
293 struct symbol **parent)
295 u8 cpumode = PERF_RECORD_MISC_USER;
299 callchain_cursor_reset(&evsel->hists.callchain_cursor);
301 for (i = 0; i < chain->nr; i++) {
303 struct addr_location al;
305 if (callchain_param.order == ORDER_CALLEE)
308 ip = chain->ips[chain->nr - i - 1];
310 if (ip >= PERF_CONTEXT_MAX) {
312 case PERF_CONTEXT_HV:
313 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
314 case PERF_CONTEXT_KERNEL:
315 cpumode = PERF_RECORD_MISC_KERNEL; break;
316 case PERF_CONTEXT_USER:
317 cpumode = PERF_RECORD_MISC_USER; break;
325 thread__find_addr_location(thread, self, cpumode,
326 MAP__FUNCTION, ip, &al, NULL);
327 if (al.sym != NULL) {
328 if (sort__has_parent && !*parent &&
329 symbol__match_parent_regex(al.sym))
331 if (!symbol_conf.use_callchain)
335 err = callchain_cursor_append(&evsel->hists.callchain_cursor,
344 static int process_event_synth_tracing_data_stub(union perf_event *event __used,
345 struct perf_session *session __used)
347 dump_printf(": unhandled!\n");
351 static int process_event_synth_attr_stub(union perf_event *event __used,
352 struct perf_evlist **pevlist __used)
354 dump_printf(": unhandled!\n");
358 static int process_event_sample_stub(struct perf_tool *tool __used,
359 union perf_event *event __used,
360 struct perf_sample *sample __used,
361 struct perf_evsel *evsel __used,
362 struct machine *machine __used)
364 dump_printf(": unhandled!\n");
368 static int process_event_stub(struct perf_tool *tool __used,
369 union perf_event *event __used,
370 struct perf_sample *sample __used,
371 struct machine *machine __used)
373 dump_printf(": unhandled!\n");
377 static int process_finished_round_stub(struct perf_tool *tool __used,
378 union perf_event *event __used,
379 struct perf_session *perf_session __used)
381 dump_printf(": unhandled!\n");
385 static int process_event_type_stub(struct perf_tool *tool __used,
386 union perf_event *event __used)
388 dump_printf(": unhandled!\n");
392 static int process_finished_round(struct perf_tool *tool,
393 union perf_event *event,
394 struct perf_session *session);
396 static void perf_tool__fill_defaults(struct perf_tool *tool)
398 if (tool->sample == NULL)
399 tool->sample = process_event_sample_stub;
400 if (tool->mmap == NULL)
401 tool->mmap = process_event_stub;
402 if (tool->comm == NULL)
403 tool->comm = process_event_stub;
404 if (tool->fork == NULL)
405 tool->fork = process_event_stub;
406 if (tool->exit == NULL)
407 tool->exit = process_event_stub;
408 if (tool->lost == NULL)
409 tool->lost = perf_event__process_lost;
410 if (tool->read == NULL)
411 tool->read = process_event_sample_stub;
412 if (tool->throttle == NULL)
413 tool->throttle = process_event_stub;
414 if (tool->unthrottle == NULL)
415 tool->unthrottle = process_event_stub;
416 if (tool->attr == NULL)
417 tool->attr = process_event_synth_attr_stub;
418 if (tool->event_type == NULL)
419 tool->event_type = process_event_type_stub;
420 if (tool->tracing_data == NULL)
421 tool->tracing_data = process_event_synth_tracing_data_stub;
422 if (tool->build_id == NULL)
423 tool->build_id = process_finished_round_stub;
424 if (tool->finished_round == NULL) {
425 if (tool->ordered_samples)
426 tool->finished_round = process_finished_round;
428 tool->finished_round = process_finished_round_stub;
432 void mem_bswap_64(void *src, int byte_size)
436 while (byte_size > 0) {
438 byte_size -= sizeof(u64);
443 static void perf_event__all64_swap(union perf_event *event)
445 struct perf_event_header *hdr = &event->header;
446 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
449 static void perf_event__comm_swap(union perf_event *event)
451 event->comm.pid = bswap_32(event->comm.pid);
452 event->comm.tid = bswap_32(event->comm.tid);
455 static void perf_event__mmap_swap(union perf_event *event)
457 event->mmap.pid = bswap_32(event->mmap.pid);
458 event->mmap.tid = bswap_32(event->mmap.tid);
459 event->mmap.start = bswap_64(event->mmap.start);
460 event->mmap.len = bswap_64(event->mmap.len);
461 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
464 static void perf_event__task_swap(union perf_event *event)
466 event->fork.pid = bswap_32(event->fork.pid);
467 event->fork.tid = bswap_32(event->fork.tid);
468 event->fork.ppid = bswap_32(event->fork.ppid);
469 event->fork.ptid = bswap_32(event->fork.ptid);
470 event->fork.time = bswap_64(event->fork.time);
473 static void perf_event__read_swap(union perf_event *event)
475 event->read.pid = bswap_32(event->read.pid);
476 event->read.tid = bswap_32(event->read.tid);
477 event->read.value = bswap_64(event->read.value);
478 event->read.time_enabled = bswap_64(event->read.time_enabled);
479 event->read.time_running = bswap_64(event->read.time_running);
480 event->read.id = bswap_64(event->read.id);
483 /* exported for swapping attributes in file header */
484 void perf_event__attr_swap(struct perf_event_attr *attr)
486 attr->type = bswap_32(attr->type);
487 attr->size = bswap_32(attr->size);
488 attr->config = bswap_64(attr->config);
489 attr->sample_period = bswap_64(attr->sample_period);
490 attr->sample_type = bswap_64(attr->sample_type);
491 attr->read_format = bswap_64(attr->read_format);
492 attr->wakeup_events = bswap_32(attr->wakeup_events);
493 attr->bp_type = bswap_32(attr->bp_type);
494 attr->bp_addr = bswap_64(attr->bp_addr);
495 attr->bp_len = bswap_64(attr->bp_len);
498 static void perf_event__hdr_attr_swap(union perf_event *event)
502 perf_event__attr_swap(&event->attr.attr);
504 size = event->header.size;
505 size -= (void *)&event->attr.id - (void *)event;
506 mem_bswap_64(event->attr.id, size);
509 static void perf_event__event_type_swap(union perf_event *event)
511 event->event_type.event_type.event_id =
512 bswap_64(event->event_type.event_type.event_id);
515 static void perf_event__tracing_data_swap(union perf_event *event)
517 event->tracing_data.size = bswap_32(event->tracing_data.size);
520 typedef void (*perf_event__swap_op)(union perf_event *event);
522 static perf_event__swap_op perf_event__swap_ops[] = {
523 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
524 [PERF_RECORD_COMM] = perf_event__comm_swap,
525 [PERF_RECORD_FORK] = perf_event__task_swap,
526 [PERF_RECORD_EXIT] = perf_event__task_swap,
527 [PERF_RECORD_LOST] = perf_event__all64_swap,
528 [PERF_RECORD_READ] = perf_event__read_swap,
529 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
530 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
531 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
532 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
533 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
534 [PERF_RECORD_HEADER_MAX] = NULL,
537 struct sample_queue {
540 union perf_event *event;
541 struct list_head list;
544 static void perf_session_free_sample_buffers(struct perf_session *session)
546 struct ordered_samples *os = &session->ordered_samples;
548 while (!list_empty(&os->to_free)) {
549 struct sample_queue *sq;
551 sq = list_entry(os->to_free.next, struct sample_queue, list);
557 static int perf_session_deliver_event(struct perf_session *session,
558 union perf_event *event,
559 struct perf_sample *sample,
560 struct perf_tool *tool,
563 static void flush_sample_queue(struct perf_session *s,
564 struct perf_tool *tool)
566 struct ordered_samples *os = &s->ordered_samples;
567 struct list_head *head = &os->samples;
568 struct sample_queue *tmp, *iter;
569 struct perf_sample sample;
570 u64 limit = os->next_flush;
571 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
572 unsigned idx = 0, progress_next = os->nr_samples / 16;
575 if (!tool->ordered_samples || !limit)
578 list_for_each_entry_safe(iter, tmp, head, list) {
579 if (iter->timestamp > limit)
582 ret = perf_session__parse_sample(s, iter->event, &sample);
584 pr_err("Can't parse sample, err = %d\n", ret);
586 perf_session_deliver_event(s, iter->event, &sample, tool,
589 os->last_flush = iter->timestamp;
590 list_del(&iter->list);
591 list_add(&iter->list, &os->sample_cache);
592 if (++idx >= progress_next) {
593 progress_next += os->nr_samples / 16;
594 ui_progress__update(idx, os->nr_samples,
595 "Processing time ordered events...");
599 if (list_empty(head)) {
600 os->last_sample = NULL;
601 } else if (last_ts <= limit) {
603 list_entry(head->prev, struct sample_queue, list);
610 * When perf record finishes a pass on every buffers, it records this pseudo
612 * We record the max timestamp t found in the pass n.
613 * Assuming these timestamps are monotonic across cpus, we know that if
614 * a buffer still has events with timestamps below t, they will be all
615 * available and then read in the pass n + 1.
616 * Hence when we start to read the pass n + 2, we can safely flush every
617 * events with timestamps below t.
619 * ============ PASS n =================
622 * cnt1 timestamps | cnt2 timestamps
625 * - | 4 <--- max recorded
627 * ============ PASS n + 1 ==============
630 * cnt1 timestamps | cnt2 timestamps
633 * 5 | 7 <---- max recorded
635 * Flush every events below timestamp 4
637 * ============ PASS n + 2 ==============
640 * cnt1 timestamps | cnt2 timestamps
645 * Flush every events below timestamp 7
648 static int process_finished_round(struct perf_tool *tool,
649 union perf_event *event __used,
650 struct perf_session *session)
652 flush_sample_queue(session, tool);
653 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
658 /* The queue is ordered by time */
659 static void __queue_event(struct sample_queue *new, struct perf_session *s)
661 struct ordered_samples *os = &s->ordered_samples;
662 struct sample_queue *sample = os->last_sample;
663 u64 timestamp = new->timestamp;
667 os->last_sample = new;
670 list_add(&new->list, &os->samples);
671 os->max_timestamp = timestamp;
676 * last_sample might point to some random place in the list as it's
677 * the last queued event. We expect that the new event is close to
680 if (sample->timestamp <= timestamp) {
681 while (sample->timestamp <= timestamp) {
682 p = sample->list.next;
683 if (p == &os->samples) {
684 list_add_tail(&new->list, &os->samples);
685 os->max_timestamp = timestamp;
688 sample = list_entry(p, struct sample_queue, list);
690 list_add_tail(&new->list, &sample->list);
692 while (sample->timestamp > timestamp) {
693 p = sample->list.prev;
694 if (p == &os->samples) {
695 list_add(&new->list, &os->samples);
698 sample = list_entry(p, struct sample_queue, list);
700 list_add(&new->list, &sample->list);
704 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
706 static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
707 struct perf_sample *sample, u64 file_offset)
709 struct ordered_samples *os = &s->ordered_samples;
710 struct list_head *sc = &os->sample_cache;
711 u64 timestamp = sample->time;
712 struct sample_queue *new;
714 if (!timestamp || timestamp == ~0ULL)
717 if (timestamp < s->ordered_samples.last_flush) {
718 printf("Warning: Timestamp below last timeslice flush\n");
722 if (!list_empty(sc)) {
723 new = list_entry(sc->next, struct sample_queue, list);
724 list_del(&new->list);
725 } else if (os->sample_buffer) {
726 new = os->sample_buffer + os->sample_buffer_idx;
727 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
728 os->sample_buffer = NULL;
730 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
731 if (!os->sample_buffer)
733 list_add(&os->sample_buffer->list, &os->to_free);
734 os->sample_buffer_idx = 2;
735 new = os->sample_buffer + 1;
738 new->timestamp = timestamp;
739 new->file_offset = file_offset;
742 __queue_event(new, s);
747 static void callchain__printf(struct perf_sample *sample)
751 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
753 for (i = 0; i < sample->callchain->nr; i++)
754 printf("..... %2d: %016" PRIx64 "\n",
755 i, sample->callchain->ips[i]);
758 static void branch_stack__printf(struct perf_sample *sample)
762 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
764 for (i = 0; i < sample->branch_stack->nr; i++)
765 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
766 i, sample->branch_stack->entries[i].from,
767 sample->branch_stack->entries[i].to);
770 static void perf_session__print_tstamp(struct perf_session *session,
771 union perf_event *event,
772 struct perf_sample *sample)
774 if (event->header.type != PERF_RECORD_SAMPLE &&
775 !session->sample_id_all) {
776 fputs("-1 -1 ", stdout);
780 if ((session->sample_type & PERF_SAMPLE_CPU))
781 printf("%u ", sample->cpu);
783 if (session->sample_type & PERF_SAMPLE_TIME)
784 printf("%" PRIu64 " ", sample->time);
787 static void dump_event(struct perf_session *session, union perf_event *event,
788 u64 file_offset, struct perf_sample *sample)
793 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
794 file_offset, event->header.size, event->header.type);
799 perf_session__print_tstamp(session, event, sample);
801 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
802 event->header.size, perf_event__name(event->header.type));
805 static void dump_sample(struct perf_session *session, union perf_event *event,
806 struct perf_sample *sample)
811 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
812 event->header.misc, sample->pid, sample->tid, sample->ip,
813 sample->period, sample->addr);
815 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
816 callchain__printf(sample);
818 if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
819 branch_stack__printf(sample);
822 static struct machine *
823 perf_session__find_machine_for_cpumode(struct perf_session *session,
824 union perf_event *event)
826 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
828 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
829 return perf_session__find_machine(session, event->ip.pid);
831 return perf_session__find_host_machine(session);
834 static int perf_session_deliver_event(struct perf_session *session,
835 union perf_event *event,
836 struct perf_sample *sample,
837 struct perf_tool *tool,
840 struct perf_evsel *evsel;
841 struct machine *machine;
843 dump_event(session, event, file_offset, sample);
845 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
846 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
848 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
849 * because the tools right now may apply filters, discarding
850 * some of the samples. For consistency, in the future we
851 * should have something like nr_filtered_samples and remove
852 * the sample->period from total_sample_period, etc, KISS for
855 * Also testing against NULL allows us to handle files without
856 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
857 * future probably it'll be a good idea to restrict event
858 * processing via perf_session to files with both set.
860 hists__inc_nr_events(&evsel->hists, event->header.type);
863 machine = perf_session__find_machine_for_cpumode(session, event);
865 switch (event->header.type) {
866 case PERF_RECORD_SAMPLE:
867 dump_sample(session, event, sample);
869 ++session->hists.stats.nr_unknown_id;
872 if (machine == NULL) {
873 ++session->hists.stats.nr_unprocessable_samples;
876 return tool->sample(tool, event, sample, evsel, machine);
877 case PERF_RECORD_MMAP:
878 return tool->mmap(tool, event, sample, machine);
879 case PERF_RECORD_COMM:
880 return tool->comm(tool, event, sample, machine);
881 case PERF_RECORD_FORK:
882 return tool->fork(tool, event, sample, machine);
883 case PERF_RECORD_EXIT:
884 return tool->exit(tool, event, sample, machine);
885 case PERF_RECORD_LOST:
886 if (tool->lost == perf_event__process_lost)
887 session->hists.stats.total_lost += event->lost.lost;
888 return tool->lost(tool, event, sample, machine);
889 case PERF_RECORD_READ:
890 return tool->read(tool, event, sample, evsel, machine);
891 case PERF_RECORD_THROTTLE:
892 return tool->throttle(tool, event, sample, machine);
893 case PERF_RECORD_UNTHROTTLE:
894 return tool->unthrottle(tool, event, sample, machine);
896 ++session->hists.stats.nr_unknown_events;
901 static int perf_session__preprocess_sample(struct perf_session *session,
902 union perf_event *event, struct perf_sample *sample)
904 if (event->header.type != PERF_RECORD_SAMPLE ||
905 !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
908 if (!ip_callchain__valid(sample->callchain, event)) {
909 pr_debug("call-chain problem with event, skipping it.\n");
910 ++session->hists.stats.nr_invalid_chains;
911 session->hists.stats.total_invalid_chains += sample->period;
917 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
918 struct perf_tool *tool, u64 file_offset)
922 dump_event(session, event, file_offset, NULL);
924 /* These events are processed right away */
925 switch (event->header.type) {
926 case PERF_RECORD_HEADER_ATTR:
927 err = tool->attr(event, &session->evlist);
929 perf_session__update_sample_type(session);
931 case PERF_RECORD_HEADER_EVENT_TYPE:
932 return tool->event_type(tool, event);
933 case PERF_RECORD_HEADER_TRACING_DATA:
934 /* setup for reading amidst mmap */
935 lseek(session->fd, file_offset, SEEK_SET);
936 return tool->tracing_data(event, session);
937 case PERF_RECORD_HEADER_BUILD_ID:
938 return tool->build_id(tool, event, session);
939 case PERF_RECORD_FINISHED_ROUND:
940 return tool->finished_round(tool, event, session);
946 static int perf_session__process_event(struct perf_session *session,
947 union perf_event *event,
948 struct perf_tool *tool,
951 struct perf_sample sample;
954 if (session->header.needs_swap &&
955 perf_event__swap_ops[event->header.type])
956 perf_event__swap_ops[event->header.type](event);
958 if (event->header.type >= PERF_RECORD_HEADER_MAX)
961 hists__inc_nr_events(&session->hists, event->header.type);
963 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
964 return perf_session__process_user_event(session, event, tool, file_offset);
967 * For all kernel events we get the sample data
969 ret = perf_session__parse_sample(session, event, &sample);
973 /* Preprocess sample records - precheck callchains */
974 if (perf_session__preprocess_sample(session, event, &sample))
977 if (tool->ordered_samples) {
978 ret = perf_session_queue_event(session, event, &sample,
984 return perf_session_deliver_event(session, event, &sample, tool,
988 void perf_event_header__bswap(struct perf_event_header *self)
990 self->type = bswap_32(self->type);
991 self->misc = bswap_16(self->misc);
992 self->size = bswap_16(self->size);
995 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
997 return machine__findnew_thread(&session->host_machine, pid);
1000 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
1002 struct thread *thread = perf_session__findnew(self, 0);
1004 if (thread == NULL || thread__set_comm(thread, "swapper")) {
1005 pr_err("problem inserting idle task.\n");
1012 static void perf_session__warn_about_errors(const struct perf_session *session,
1013 const struct perf_tool *tool)
1015 if (tool->lost == perf_event__process_lost &&
1016 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
1017 ui__warning("Processed %d events and lost %d chunks!\n\n"
1018 "Check IO/CPU overload!\n\n",
1019 session->hists.stats.nr_events[0],
1020 session->hists.stats.nr_events[PERF_RECORD_LOST]);
1023 if (session->hists.stats.nr_unknown_events != 0) {
1024 ui__warning("Found %u unknown events!\n\n"
1025 "Is this an older tool processing a perf.data "
1026 "file generated by a more recent tool?\n\n"
1027 "If that is not the case, consider "
1028 "reporting to linux-kernel@vger.kernel.org.\n\n",
1029 session->hists.stats.nr_unknown_events);
1032 if (session->hists.stats.nr_unknown_id != 0) {
1033 ui__warning("%u samples with id not present in the header\n",
1034 session->hists.stats.nr_unknown_id);
1037 if (session->hists.stats.nr_invalid_chains != 0) {
1038 ui__warning("Found invalid callchains!\n\n"
1039 "%u out of %u events were discarded for this reason.\n\n"
1040 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1041 session->hists.stats.nr_invalid_chains,
1042 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
1045 if (session->hists.stats.nr_unprocessable_samples != 0) {
1046 ui__warning("%u unprocessable samples recorded.\n"
1047 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1048 session->hists.stats.nr_unprocessable_samples);
1052 #define session_done() (*(volatile int *)(&session_done))
1053 volatile int session_done;
1055 static int __perf_session__process_pipe_events(struct perf_session *self,
1056 struct perf_tool *tool)
1058 union perf_event event;
1065 perf_tool__fill_defaults(tool);
1069 err = readn(self->fd, &event, sizeof(struct perf_event_header));
1074 pr_err("failed to read event header\n");
1078 if (self->header.needs_swap)
1079 perf_event_header__bswap(&event.header);
1081 size = event.header.size;
1086 p += sizeof(struct perf_event_header);
1088 if (size - sizeof(struct perf_event_header)) {
1089 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1092 pr_err("unexpected end of event stream\n");
1096 pr_err("failed to read event data\n");
1101 if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
1102 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1103 head, event.header.size, event.header.type);
1105 * assume we lost track of the stream, check alignment, and
1106 * increment a single u64 in the hope to catch on again 'soon'.
1108 if (unlikely(head & 7))
1119 if (!session_done())
1124 perf_session__warn_about_errors(self, tool);
1125 perf_session_free_sample_buffers(self);
1129 static union perf_event *
1130 fetch_mmaped_event(struct perf_session *session,
1131 u64 head, size_t mmap_size, char *buf)
1133 union perf_event *event;
1136 * Ensure we have enough space remaining to read
1137 * the size of the event in the headers.
1139 if (head + sizeof(event->header) > mmap_size)
1142 event = (union perf_event *)(buf + head);
1144 if (session->header.needs_swap)
1145 perf_event_header__bswap(&event->header);
1147 if (head + event->header.size > mmap_size)
1153 int __perf_session__process_events(struct perf_session *session,
1154 u64 data_offset, u64 data_size,
1155 u64 file_size, struct perf_tool *tool)
1157 u64 head, page_offset, file_offset, file_pos, progress_next;
1158 int err, mmap_prot, mmap_flags, map_idx = 0;
1159 size_t page_size, mmap_size;
1160 char *buf, *mmaps[8];
1161 union perf_event *event;
1164 perf_tool__fill_defaults(tool);
1166 page_size = sysconf(_SC_PAGESIZE);
1168 page_offset = page_size * (data_offset / page_size);
1169 file_offset = page_offset;
1170 head = data_offset - page_offset;
1172 if (data_offset + data_size < file_size)
1173 file_size = data_offset + data_size;
1175 progress_next = file_size / 16;
1177 mmap_size = session->mmap_window;
1178 if (mmap_size > file_size)
1179 mmap_size = file_size;
1181 memset(mmaps, 0, sizeof(mmaps));
1183 mmap_prot = PROT_READ;
1184 mmap_flags = MAP_SHARED;
1186 if (session->header.needs_swap) {
1187 mmap_prot |= PROT_WRITE;
1188 mmap_flags = MAP_PRIVATE;
1191 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1193 if (buf == MAP_FAILED) {
1194 pr_err("failed to mmap file\n");
1198 mmaps[map_idx] = buf;
1199 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1200 file_pos = file_offset + head;
1203 event = fetch_mmaped_event(session, head, mmap_size, buf);
1205 if (mmaps[map_idx]) {
1206 munmap(mmaps[map_idx], mmap_size);
1207 mmaps[map_idx] = NULL;
1210 page_offset = page_size * (head / page_size);
1211 file_offset += page_offset;
1212 head -= page_offset;
1216 size = event->header.size;
1219 perf_session__process_event(session, event, tool, file_pos) < 0) {
1220 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1221 file_offset + head, event->header.size,
1222 event->header.type);
1224 * assume we lost track of the stream, check alignment, and
1225 * increment a single u64 in the hope to catch on again 'soon'.
1227 if (unlikely(head & 7))
1236 if (file_pos >= progress_next) {
1237 progress_next += file_size / 16;
1238 ui_progress__update(file_pos, file_size,
1239 "Processing events...");
1242 if (file_pos < file_size)
1246 /* do the final flush for ordered samples */
1247 session->ordered_samples.next_flush = ULLONG_MAX;
1248 flush_sample_queue(session, tool);
1250 perf_session__warn_about_errors(session, tool);
1251 perf_session_free_sample_buffers(session);
1255 int perf_session__process_events(struct perf_session *self,
1256 struct perf_tool *tool)
1260 if (perf_session__register_idle_thread(self) == NULL)
1264 err = __perf_session__process_events(self,
1265 self->header.data_offset,
1266 self->header.data_size,
1269 err = __perf_session__process_pipe_events(self, tool);
1274 bool perf_session__has_traces(struct perf_session *self, const char *msg)
1276 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1277 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1284 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1285 const char *symbol_name, u64 addr)
1289 struct ref_reloc_sym *ref;
1291 ref = zalloc(sizeof(struct ref_reloc_sym));
1295 ref->name = strdup(symbol_name);
1296 if (ref->name == NULL) {
1301 bracket = strchr(ref->name, ']');
1307 for (i = 0; i < MAP__NR_TYPES; ++i) {
1308 struct kmap *kmap = map__kmap(maps[i]);
1309 kmap->ref_reloc_sym = ref;
1315 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1317 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1318 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1319 machines__fprintf_dsos(&self->machines, fp);
1322 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1325 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1326 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1329 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1331 struct perf_evsel *pos;
1332 size_t ret = fprintf(fp, "Aggregated stats:\n");
1334 ret += hists__fprintf_nr_events(&session->hists, fp);
1336 list_for_each_entry(pos, &session->evlist->entries, node) {
1337 ret += fprintf(fp, "%s stats:\n", event_name(pos));
1338 ret += hists__fprintf_nr_events(&pos->hists, fp);
1344 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1347 * FIXME: Here we have to actually print all the machines in this
1348 * session, not just the host...
1350 return machine__fprintf(&session->host_machine, fp);
1353 void perf_session__remove_thread(struct perf_session *session,
1357 * FIXME: This one makes no sense, we need to remove the thread from
1358 * the machine it belongs to, perf_session can have many machines, so
1359 * doing it always on ->host_machine is wrong. Fix when auditing all
1360 * the 'perf kvm' code.
1362 machine__remove_thread(&session->host_machine, th);
1365 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1368 struct perf_evsel *pos;
1370 list_for_each_entry(pos, &session->evlist->entries, node) {
1371 if (pos->attr.type == type)
1377 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1378 struct machine *machine, struct perf_evsel *evsel,
1379 int print_sym, int print_dso, int print_symoffset)
1381 struct addr_location al;
1382 struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
1383 struct callchain_cursor_node *node;
1385 if (perf_event__preprocess_sample(event, machine, &al, sample,
1387 error("problem processing %d event, skipping it.\n",
1388 event->header.type);
1392 if (symbol_conf.use_callchain && sample->callchain) {
1394 if (machine__resolve_callchain(machine, evsel, al.thread,
1395 sample->callchain, NULL) != 0) {
1397 error("Failed to resolve callchain. Skipping\n");
1400 callchain_cursor_commit(cursor);
1403 node = callchain_cursor_current(cursor);
1407 printf("\t%16" PRIx64, node->ip);
1410 symbol__fprintf_symname(node->sym, stdout);
1414 map__fprintf_dsoname(al.map, stdout);
1419 callchain_cursor_advance(cursor);
1423 printf("%16" PRIx64, sample->ip);
1426 if (print_symoffset)
1427 symbol__fprintf_symname_offs(al.sym, &al,
1430 symbol__fprintf_symname(al.sym, stdout);
1435 map__fprintf_dsoname(al.map, stdout);
1441 int perf_session__cpu_bitmap(struct perf_session *session,
1442 const char *cpu_list, unsigned long *cpu_bitmap)
1445 struct cpu_map *map;
1447 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1448 struct perf_evsel *evsel;
1450 evsel = perf_session__find_first_evtype(session, i);
1454 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1455 pr_err("File does not contain CPU events. "
1456 "Remove -c option to proceed.\n");
1461 map = cpu_map__new(cpu_list);
1463 pr_err("Invalid cpu_list\n");
1467 for (i = 0; i < map->nr; i++) {
1468 int cpu = map->map[i];
1470 if (cpu >= MAX_NR_CPUS) {
1471 pr_err("Requested CPU %d too large. "
1472 "Consider raising MAX_NR_CPUS\n", cpu);
1476 set_bit(cpu, cpu_bitmap);
1482 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1488 if (session == NULL || fp == NULL)
1491 ret = fstat(session->fd, &st);
1495 fprintf(fp, "# ========\n");
1496 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1497 perf_header__fprintf_info(session, fp, full);
1498 fprintf(fp, "# ========\n#\n");