4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon and Jose Renato Santos for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
13 * Separated out arch-generic part
14 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15 * VA Linux Systems Japan K.K.
18 #include <linux/init.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/oprofile.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/vmalloc.h>
26 #include <asm/pgtable.h>
27 #include <xen/evtchn.h>
28 #include <xen/xenoprof.h>
29 #include <xen/interface/xen.h>
30 #include <xen/interface/xenoprof.h>
31 #include "../../../drivers/oprofile/event_buffer.h"
33 #define MAX_XENOPROF_SAMPLES 16
35 /* sample buffers shared with Xen */
36 static xenoprof_buf_t **__read_mostly xenoprof_buf;
37 /* Shared buffer area */
38 static struct xenoprof_shared_buffer shared_buffer;
40 /* Passive sample buffers shared with Xen */
41 static xenoprof_buf_t **__read_mostly p_xenoprof_buf[MAX_OPROF_DOMAINS];
42 /* Passive shared buffer area */
43 static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
45 static int xenoprof_start(void);
46 static void xenoprof_stop(void);
48 static int xenoprof_enabled = 0;
49 static int xenoprof_is_primary = 0;
50 static int active_defined;
52 extern unsigned long oprofile_backtrace_depth;
54 /* Number of buffers in shared area (one per VCPU) */
56 /* Mapping of VIRQ_XENOPROF to irq number */
57 static int ovf_irq = -1;
58 static cpumask_var_t ovf_irq_mapped;
59 /* cpu model type string - copied from Xen on XENOPROF_init command */
60 static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
62 #ifdef CONFIG_PM_SLEEP
64 static int xenoprof_suspend(void)
66 if (xenoprof_enabled == 1)
72 static void xenoprof_resume(void)
74 if (xenoprof_enabled == 1)
79 static struct syscore_ops oprofile_syscore_ops = {
80 .resume = xenoprof_resume,
81 .suspend = xenoprof_suspend
85 static int __init init_driverfs(void)
87 register_syscore_ops(&oprofile_syscore_ops);
92 static void exit_driverfs(void)
94 unregister_syscore_ops(&oprofile_syscore_ops);
98 #define init_driverfs() do { } while (0)
99 #define exit_driverfs() do { } while (0)
100 #endif /* CONFIG_PM_SLEEP */
102 static unsigned long long oprofile_samples;
103 static unsigned long long p_oprofile_samples;
105 static unsigned int pdomains;
106 static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
108 /* Check whether the given entry is an escape code */
109 static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
111 #if CONFIG_XEN_COMPAT < 0x040200 && !defined(CONFIG_64BIT)
112 if (buf->event_log[tail].eip == (unsigned long)XENOPROF_ESCAPE_CODE)
115 return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
118 /* Get the event at the given entry */
119 static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
121 return (buf->event_log[tail].event);
124 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
126 int head, tail, size;
129 head = buf->event_head;
130 tail = buf->event_tail;
131 size = buf->event_size;
133 while (tail != head) {
134 if (xenoprof_is_escape(buf, tail) &&
135 xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
137 oprofile_add_mode(buf->event_log[tail].mode);
141 p_oprofile_samples++;
144 oprofile_add_pc(buf->event_log[tail].eip,
145 buf->event_log[tail].mode,
146 buf->event_log[tail].event);
151 p_oprofile_samples++;
159 buf->event_tail = tail;
162 static void xenoprof_handle_passive(void)
165 int flag_domain, flag_switch = 0;
167 for (i = 0; i < pdomains; i++) {
169 for (j = 0; j < passive_domains[i].nbuf; j++) {
170 xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
171 if (buf->event_head == buf->event_tail)
174 if (!oprofile_add_domain_switch(
175 passive_domains[i].domain_id))
179 xenoprof_add_pc(buf, 1);
185 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
188 static irqreturn_t xenoprof_ovf_interrupt(int irq, void *dev_id)
190 struct xenoprof_buf * buf;
191 static unsigned long flag;
193 buf = xenoprof_buf[smp_processor_id()];
195 xenoprof_add_pc(buf, 0);
197 if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
198 xenoprof_handle_passive();
199 smp_mb__before_clear_bit();
206 static struct irqaction ovf_action = {
207 .handler = xenoprof_ovf_interrupt,
208 .flags = IRQF_DISABLED,
212 static void unbind_virq(void)
216 for_each_online_cpu(i) {
217 if (cpumask_test_and_clear_cpu(i, ovf_irq_mapped))
218 unbind_from_per_cpu_irq(ovf_irq, i, &ovf_action);
224 static int bind_virq(void)
229 for_each_online_cpu(i) {
230 result = bind_virq_to_irqaction(VIRQ_XENOPROF, i, &ovf_action);
239 else if (result != ovf_irq) {
241 pr_err("IRQ%d unexpected (should be %d)\n",
245 cpumask_set_cpu(i, ovf_irq_mapped);
252 static xenoprof_buf_t **get_buffer_array(unsigned int nbuf)
254 size_t size = nbuf * sizeof(xenoprof_buf_t);
256 if (size <= PAGE_SIZE)
257 return kmalloc(size, GFP_KERNEL);
258 return vmalloc(size);
261 static void release_buffer_array(xenoprof_buf_t **buf, unsigned int nbuf)
263 if (nbuf * sizeof(xenoprof_buf_t) <= PAGE_SIZE)
270 static void unmap_passive_list(void)
273 for (i = 0; i < pdomains; i++) {
274 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
275 release_buffer_array(p_xenoprof_buf[i],
276 passive_domains[i].nbuf);
282 static int map_xenoprof_buffer(int max_samples)
284 struct xenoprof_get_buffer get_buffer;
285 struct xenoprof_buf *buf;
288 if ( shared_buffer.buffer )
291 get_buffer.max_samples = max_samples;
292 ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
295 nbuf = get_buffer.nbuf;
297 xenoprof_buf = get_buffer_array(nbuf);
299 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
303 for (i=0; i< nbuf; i++) {
304 buf = (struct xenoprof_buf*)
305 &shared_buffer.buffer[i * get_buffer.bufsize];
306 BUG_ON(buf->vcpu_id >= nbuf);
307 xenoprof_buf[buf->vcpu_id] = buf;
314 static int xenoprof_setup(void)
318 if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
321 if ( (ret = bind_virq()) ) {
322 release_buffer_array(xenoprof_buf, nbuf);
326 if (xenoprof_is_primary) {
327 /* Define dom0 as an active domain if not done yet */
328 if (!active_defined) {
330 ret = HYPERVISOR_xenoprof_op(
331 XENOPROF_reset_active_list, NULL);
335 ret = HYPERVISOR_xenoprof_op(
336 XENOPROF_set_active, &domid);
342 if (oprofile_backtrace_depth > 0) {
343 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace,
344 &oprofile_backtrace_depth);
346 oprofile_backtrace_depth = 0;
349 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
353 xenoprof_arch_counter();
354 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
359 ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
363 xenoprof_enabled = 1;
367 release_buffer_array(xenoprof_buf, nbuf);
372 static void xenoprof_shutdown(void)
374 xenoprof_enabled = 0;
376 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
378 if (xenoprof_is_primary) {
379 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
386 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
387 if (xenoprof_is_primary)
388 unmap_passive_list();
389 release_buffer_array(xenoprof_buf, nbuf);
393 static int xenoprof_start(void)
397 if (xenoprof_is_primary)
398 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
400 xenoprof_arch_start();
405 static void xenoprof_stop(void)
407 if (xenoprof_is_primary)
408 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
409 xenoprof_arch_stop();
413 static int xenoprof_set_active(int * active_domains,
414 unsigned int adomains)
421 if (!xenoprof_is_primary)
424 if (adomains > MAX_OPROF_DOMAINS)
427 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
431 for (i=0; i<adomains; i++) {
432 domid = active_domains[i];
433 if (domid != active_domains[i]) {
437 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
440 if (active_domains[i] == 0)
443 /* dom0 must always be active but may not be in the list */
446 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
451 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
453 active_defined = !ret;
457 static int xenoprof_set_passive(int * p_domains,
462 struct xenoprof_buf *buf;
464 if (!xenoprof_is_primary)
467 if (pdoms > MAX_OPROF_DOMAINS)
470 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
473 unmap_passive_list();
475 for (i = 0; i < pdoms; i++) {
476 passive_domains[i].domain_id = p_domains[i];
477 passive_domains[i].max_samples = 2048;
478 ret = xenoprof_arch_set_passive(&passive_domains[i],
479 &p_shared_buffer[i]);
483 p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf);
484 if (!p_xenoprof_buf[i]) {
490 for (j = 0; j < passive_domains[i].nbuf; j++) {
491 buf = (struct xenoprof_buf *)
492 &p_shared_buffer[i].buffer[
493 j * passive_domains[i].bufsize];
494 BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf);
495 p_xenoprof_buf[i][buf->vcpu_id] = buf;
503 for (j = 0; j < i; j++) {
504 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
505 release_buffer_array(p_xenoprof_buf[i],
506 passive_domains[i].nbuf);
513 /* The dummy backtrace function to keep oprofile happy
514 * The real backtrace is done in xen
516 static void xenoprof_dummy_backtrace(struct pt_regs * const regs,
519 /* this should never be called */
525 static struct oprofile_operations xenoprof_ops = {
526 #ifdef HAVE_XENOPROF_CREATE_FILES
527 .create_files = xenoprof_create_files,
529 .set_active = xenoprof_set_active,
530 .set_passive = xenoprof_set_passive,
531 .setup = xenoprof_setup,
532 .shutdown = xenoprof_shutdown,
533 .start = xenoprof_start,
534 .stop = xenoprof_stop,
535 .backtrace = xenoprof_dummy_backtrace
539 /* in order to get driverfs right */
540 static int using_xenoprof;
542 int __init xenoprofile_init(struct oprofile_operations * ops)
544 struct xenoprof_init init;
547 if (!zalloc_cpumask_var(&ovf_irq_mapped, GFP_KERNEL))
550 ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
552 xenoprof_arch_init_counter(&init);
553 xenoprof_is_primary = init.is_primary;
555 /* cpu_type is detected by Xen */
556 strlcpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE);
557 xenoprof_ops.cpu_type = cpu_type;
565 free_cpumask_var(ovf_irq_mapped);
567 pr_info("%s: ret %d, events %d, xenoprof_is_primary %d\n",
568 __func__, ret, init.num_events, xenoprof_is_primary);
573 void xenoprofile_exit(void)
578 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
579 if (xenoprof_is_primary) {
580 unmap_passive_list();
581 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));
584 free_cpumask_var(ovf_irq_mapped);