2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "thread_map.h"
18 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
19 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
21 int __perf_evsel__sample_size(u64 sample_type)
23 u64 mask = sample_type & PERF_SAMPLE_MASK;
27 for (i = 0; i < 64; i++) {
28 if (mask & (1ULL << i))
37 static void hists__init(struct hists *hists)
39 memset(hists, 0, sizeof(*hists));
40 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
41 hists->entries_in = &hists->entries_in_array[0];
42 hists->entries_collapsed = RB_ROOT;
43 hists->entries = RB_ROOT;
44 pthread_mutex_init(&hists->lock, NULL);
47 void perf_evsel__init(struct perf_evsel *evsel,
48 struct perf_event_attr *attr, int idx)
52 INIT_LIST_HEAD(&evsel->node);
53 hists__init(&evsel->hists);
56 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
58 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
61 perf_evsel__init(evsel, attr, idx);
66 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
67 struct perf_evsel *first)
69 struct perf_event_attr *attr = &evsel->attr;
70 int track = !evsel->idx; /* only the first counter needs these */
72 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
73 attr->inherit = !opts->no_inherit;
74 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
75 PERF_FORMAT_TOTAL_TIME_RUNNING |
78 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
81 * We default some events to a 1 default interval. But keep
82 * it a weak assumption overridable by the user.
84 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
85 opts->user_interval != ULLONG_MAX)) {
87 attr->sample_type |= PERF_SAMPLE_PERIOD;
89 attr->sample_freq = opts->freq;
91 attr->sample_period = opts->default_interval;
96 attr->sample_freq = 0;
98 if (opts->inherit_stat)
99 attr->inherit_stat = 1;
101 if (opts->sample_address) {
102 attr->sample_type |= PERF_SAMPLE_ADDR;
103 attr->mmap_data = track;
106 if (opts->call_graph)
107 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
109 if (opts->system_wide)
110 attr->sample_type |= PERF_SAMPLE_CPU;
113 attr->sample_type |= PERF_SAMPLE_PERIOD;
115 if (!opts->sample_id_all_missing &&
116 (opts->sample_time || opts->system_wide ||
117 !opts->no_inherit || opts->cpu_list))
118 attr->sample_type |= PERF_SAMPLE_TIME;
120 if (opts->raw_samples) {
121 attr->sample_type |= PERF_SAMPLE_TIME;
122 attr->sample_type |= PERF_SAMPLE_RAW;
123 attr->sample_type |= PERF_SAMPLE_CPU;
126 if (opts->no_delay) {
128 attr->wakeup_events = 1;
130 if (opts->branch_stack) {
131 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
132 attr->branch_sample_type = opts->branch_stack;
138 if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
139 (!opts->group || evsel == first)) {
141 attr->enable_on_exec = 1;
145 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
148 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
151 for (cpu = 0; cpu < ncpus; cpu++) {
152 for (thread = 0; thread < nthreads; thread++) {
153 FD(evsel, cpu, thread) = -1;
158 return evsel->fd != NULL ? 0 : -ENOMEM;
161 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
163 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
164 if (evsel->sample_id == NULL)
167 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
168 if (evsel->id == NULL) {
169 xyarray__delete(evsel->sample_id);
170 evsel->sample_id = NULL;
177 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
179 evsel->counts = zalloc((sizeof(*evsel->counts) +
180 (ncpus * sizeof(struct perf_counts_values))));
181 return evsel->counts != NULL ? 0 : -ENOMEM;
184 void perf_evsel__free_fd(struct perf_evsel *evsel)
186 xyarray__delete(evsel->fd);
190 void perf_evsel__free_id(struct perf_evsel *evsel)
192 xyarray__delete(evsel->sample_id);
193 evsel->sample_id = NULL;
198 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
202 for (cpu = 0; cpu < ncpus; cpu++)
203 for (thread = 0; thread < nthreads; ++thread) {
204 close(FD(evsel, cpu, thread));
205 FD(evsel, cpu, thread) = -1;
209 void perf_evsel__exit(struct perf_evsel *evsel)
211 assert(list_empty(&evsel->node));
212 xyarray__delete(evsel->fd);
213 xyarray__delete(evsel->sample_id);
217 void perf_evsel__delete(struct perf_evsel *evsel)
219 perf_evsel__exit(evsel);
220 close_cgroup(evsel->cgrp);
225 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
226 int cpu, int thread, bool scale)
228 struct perf_counts_values count;
229 size_t nv = scale ? 3 : 1;
231 if (FD(evsel, cpu, thread) < 0)
234 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
237 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
243 else if (count.run < count.ena)
244 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
246 count.ena = count.run = 0;
248 evsel->counts->cpu[cpu] = count;
252 int __perf_evsel__read(struct perf_evsel *evsel,
253 int ncpus, int nthreads, bool scale)
255 size_t nv = scale ? 3 : 1;
257 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
259 aggr->val = aggr->ena = aggr->run = 0;
261 for (cpu = 0; cpu < ncpus; cpu++) {
262 for (thread = 0; thread < nthreads; thread++) {
263 if (FD(evsel, cpu, thread) < 0)
266 if (readn(FD(evsel, cpu, thread),
267 &count, nv * sizeof(u64)) < 0)
270 aggr->val += count.val;
272 aggr->ena += count.ena;
273 aggr->run += count.run;
278 evsel->counts->scaled = 0;
280 if (aggr->run == 0) {
281 evsel->counts->scaled = -1;
286 if (aggr->run < aggr->ena) {
287 evsel->counts->scaled = 1;
288 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
291 aggr->ena = aggr->run = 0;
296 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
297 struct thread_map *threads, bool group,
298 struct xyarray *group_fds)
301 unsigned long flags = 0;
304 if (evsel->fd == NULL &&
305 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
309 flags = PERF_FLAG_PID_CGROUP;
310 pid = evsel->cgrp->fd;
313 for (cpu = 0; cpu < cpus->nr; cpu++) {
314 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
316 for (thread = 0; thread < threads->nr; thread++) {
319 pid = threads->map[thread];
321 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
325 if (FD(evsel, cpu, thread) < 0) {
330 if (group && group_fd == -1)
331 group_fd = FD(evsel, cpu, thread);
339 while (--thread >= 0) {
340 close(FD(evsel, cpu, thread));
341 FD(evsel, cpu, thread) = -1;
343 thread = threads->nr;
344 } while (--cpu >= 0);
348 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
350 if (evsel->fd == NULL)
353 perf_evsel__close_fd(evsel, ncpus, nthreads);
354 perf_evsel__free_fd(evsel);
367 struct thread_map map;
369 } empty_thread_map = {
374 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
375 struct thread_map *threads, bool group,
376 struct xyarray *group_fd)
379 /* Work around old compiler warnings about strict aliasing */
380 cpus = &empty_cpu_map.map;
384 threads = &empty_thread_map.map;
386 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
389 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
390 struct cpu_map *cpus, bool group,
391 struct xyarray *group_fd)
393 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
397 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
398 struct thread_map *threads, bool group,
399 struct xyarray *group_fd)
401 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
405 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
406 struct perf_sample *sample)
408 const u64 *array = event->sample.array;
410 array += ((event->header.size -
411 sizeof(event->header)) / sizeof(u64)) - 1;
413 if (type & PERF_SAMPLE_CPU) {
414 u32 *p = (u32 *)array;
419 if (type & PERF_SAMPLE_STREAM_ID) {
420 sample->stream_id = *array;
424 if (type & PERF_SAMPLE_ID) {
429 if (type & PERF_SAMPLE_TIME) {
430 sample->time = *array;
434 if (type & PERF_SAMPLE_TID) {
435 u32 *p = (u32 *)array;
443 static bool sample_overlap(const union perf_event *event,
444 const void *offset, u64 size)
446 const void *base = event;
448 if (offset + size > base + event->header.size)
454 int perf_event__parse_sample(const union perf_event *event, u64 type,
455 int sample_size, bool sample_id_all,
456 struct perf_sample *data, bool swapped)
461 * used for cross-endian analysis. See git commit 65014ab3
462 * for why this goofiness is needed.
469 memset(data, 0, sizeof(*data));
470 data->cpu = data->pid = data->tid = -1;
471 data->stream_id = data->id = data->time = -1ULL;
474 if (event->header.type != PERF_RECORD_SAMPLE) {
477 return perf_event__parse_id_sample(event, type, data);
480 array = event->sample.array;
482 if (sample_size + sizeof(event->header) > event->header.size)
485 if (type & PERF_SAMPLE_IP) {
486 data->ip = event->ip.ip;
490 if (type & PERF_SAMPLE_TID) {
493 /* undo swap of u64, then swap on individual u32s */
494 u.val64 = bswap_64(u.val64);
495 u.val32[0] = bswap_32(u.val32[0]);
496 u.val32[1] = bswap_32(u.val32[1]);
499 data->pid = u.val32[0];
500 data->tid = u.val32[1];
504 if (type & PERF_SAMPLE_TIME) {
510 if (type & PERF_SAMPLE_ADDR) {
516 if (type & PERF_SAMPLE_ID) {
521 if (type & PERF_SAMPLE_STREAM_ID) {
522 data->stream_id = *array;
526 if (type & PERF_SAMPLE_CPU) {
530 /* undo swap of u64, then swap on individual u32s */
531 u.val64 = bswap_64(u.val64);
532 u.val32[0] = bswap_32(u.val32[0]);
535 data->cpu = u.val32[0];
539 if (type & PERF_SAMPLE_PERIOD) {
540 data->period = *array;
544 if (type & PERF_SAMPLE_READ) {
545 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
549 if (type & PERF_SAMPLE_CALLCHAIN) {
550 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
553 data->callchain = (struct ip_callchain *)array;
555 if (sample_overlap(event, array, data->callchain->nr))
558 array += 1 + data->callchain->nr;
561 if (type & PERF_SAMPLE_RAW) {
565 if (WARN_ONCE(swapped,
566 "Endianness of raw data not corrected!\n")) {
567 /* undo swap of u64, then swap on individual u32s */
568 u.val64 = bswap_64(u.val64);
569 u.val32[0] = bswap_32(u.val32[0]);
570 u.val32[1] = bswap_32(u.val32[1]);
573 if (sample_overlap(event, array, sizeof(u32)))
576 data->raw_size = u.val32[0];
577 pdata = (void *) array + sizeof(u32);
579 if (sample_overlap(event, pdata, data->raw_size))
582 data->raw_data = (void *) pdata;
585 if (type & PERF_SAMPLE_BRANCH_STACK) {
588 data->branch_stack = (struct branch_stack *)array;
591 sz = data->branch_stack->nr * sizeof(struct branch_entry);
598 int perf_event__synthesize_sample(union perf_event *event, u64 type,
599 const struct perf_sample *sample,
605 * used for cross-endian analysis. See git commit 65014ab3
606 * for why this goofiness is needed.
613 array = event->sample.array;
615 if (type & PERF_SAMPLE_IP) {
616 event->ip.ip = sample->ip;
620 if (type & PERF_SAMPLE_TID) {
621 u.val32[0] = sample->pid;
622 u.val32[1] = sample->tid;
625 * Inverse of what is done in perf_event__parse_sample
627 u.val32[0] = bswap_32(u.val32[0]);
628 u.val32[1] = bswap_32(u.val32[1]);
629 u.val64 = bswap_64(u.val64);
636 if (type & PERF_SAMPLE_TIME) {
637 *array = sample->time;
641 if (type & PERF_SAMPLE_ADDR) {
642 *array = sample->addr;
646 if (type & PERF_SAMPLE_ID) {
651 if (type & PERF_SAMPLE_STREAM_ID) {
652 *array = sample->stream_id;
656 if (type & PERF_SAMPLE_CPU) {
657 u.val32[0] = sample->cpu;
660 * Inverse of what is done in perf_event__parse_sample
662 u.val32[0] = bswap_32(u.val32[0]);
663 u.val64 = bswap_64(u.val64);
669 if (type & PERF_SAMPLE_PERIOD) {
670 *array = sample->period;