4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon and Jose Renato Santos for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
13 * Separated out arch-generic part
14 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15 * VA Linux Systems Japan K.K.
18 #include <linux/init.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/oprofile.h>
22 #include <linux/sysdev.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/vmalloc.h>
26 #include <asm/pgtable.h>
27 #include <xen/evtchn.h>
28 #include <xen/xenoprof.h>
29 #include <xen/driver_util.h>
30 #include <xen/interface/xen.h>
31 #include <xen/interface/xenoprof.h>
32 #include "../../../drivers/oprofile/event_buffer.h"
34 #define MAX_XENOPROF_SAMPLES 16
36 /* sample buffers shared with Xen */
37 static xenoprof_buf_t **__read_mostly xenoprof_buf;
38 /* Shared buffer area */
39 static struct xenoprof_shared_buffer shared_buffer;
41 /* Passive sample buffers shared with Xen */
42 static xenoprof_buf_t **__read_mostly p_xenoprof_buf[MAX_OPROF_DOMAINS];
43 /* Passive shared buffer area */
44 static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
46 static int xenoprof_start(void);
47 static void xenoprof_stop(void);
49 static int xenoprof_enabled = 0;
50 static int xenoprof_is_primary = 0;
51 static int active_defined;
53 extern unsigned long oprofile_backtrace_depth;
55 /* Number of buffers in shared area (one per VCPU) */
57 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
58 static int ovf_irq[NR_CPUS];
59 /* cpu model type string - copied from Xen on XENOPROF_init command */
60 static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
64 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
66 if (xenoprof_enabled == 1)
72 static int xenoprof_resume(struct sys_device * dev)
74 if (xenoprof_enabled == 1)
80 static struct sysdev_class oprofile_sysclass = {
82 .resume = xenoprof_resume,
83 .suspend = xenoprof_suspend
87 static struct sys_device device_oprofile = {
89 .cls = &oprofile_sysclass,
93 static int __init init_driverfs(void)
96 if (!(error = sysdev_class_register(&oprofile_sysclass)))
97 error = sysdev_register(&device_oprofile);
102 static void exit_driverfs(void)
104 sysdev_unregister(&device_oprofile);
105 sysdev_class_unregister(&oprofile_sysclass);
109 #define init_driverfs() do { } while (0)
110 #define exit_driverfs() do { } while (0)
111 #endif /* CONFIG_PM */
113 static unsigned long long oprofile_samples;
114 static unsigned long long p_oprofile_samples;
116 static unsigned int pdomains;
117 static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
119 /* Check whether the given entry is an escape code */
120 static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
122 return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
125 /* Get the event at the given entry */
126 static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
128 return (buf->event_log[tail].event);
131 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
133 int head, tail, size;
136 head = buf->event_head;
137 tail = buf->event_tail;
138 size = buf->event_size;
140 while (tail != head) {
141 if (xenoprof_is_escape(buf, tail) &&
142 xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
144 oprofile_add_mode(buf->event_log[tail].mode);
148 p_oprofile_samples++;
151 oprofile_add_pc(buf->event_log[tail].eip,
152 buf->event_log[tail].mode,
153 buf->event_log[tail].event);
158 p_oprofile_samples++;
166 buf->event_tail = tail;
169 static void xenoprof_handle_passive(void)
172 int flag_domain, flag_switch = 0;
174 for (i = 0; i < pdomains; i++) {
176 for (j = 0; j < passive_domains[i].nbuf; j++) {
177 xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
178 if (buf->event_head == buf->event_tail)
181 if (!oprofile_add_domain_switch(
182 passive_domains[i].domain_id))
186 xenoprof_add_pc(buf, 1);
192 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
195 static irqreturn_t xenoprof_ovf_interrupt(int irq, void *dev_id)
197 struct xenoprof_buf * buf;
198 static unsigned long flag;
200 buf = xenoprof_buf[smp_processor_id()];
202 xenoprof_add_pc(buf, 0);
204 if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
205 xenoprof_handle_passive();
206 smp_mb__before_clear_bit();
213 static struct irqaction ovf_action = {
214 .handler = xenoprof_ovf_interrupt,
215 .flags = IRQF_DISABLED,
219 static void unbind_virq(void)
223 for_each_online_cpu(i) {
224 if (ovf_irq[i] >= 0) {
225 unbind_from_per_cpu_irq(ovf_irq[i], i, &ovf_action);
232 static int bind_virq(void)
237 for_each_online_cpu(i) {
238 result = bind_virq_to_irqaction(VIRQ_XENOPROF, i, &ovf_action);
252 static xenoprof_buf_t **get_buffer_array(unsigned int nbuf)
254 size_t size = nbuf * sizeof(xenoprof_buf_t);
256 if (size <= PAGE_SIZE)
257 return kmalloc(size, GFP_KERNEL);
258 return vmalloc(size);
261 static void release_buffer_array(xenoprof_buf_t **buf, unsigned int nbuf)
263 if (nbuf * sizeof(xenoprof_buf_t) <= PAGE_SIZE)
270 static void unmap_passive_list(void)
273 for (i = 0; i < pdomains; i++) {
274 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
275 release_buffer_array(p_xenoprof_buf[i],
276 passive_domains[i].nbuf);
282 static int map_xenoprof_buffer(int max_samples)
284 struct xenoprof_get_buffer get_buffer;
285 struct xenoprof_buf *buf;
288 if ( shared_buffer.buffer )
291 get_buffer.max_samples = max_samples;
292 ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
295 nbuf = get_buffer.nbuf;
297 xenoprof_buf = get_buffer_array(nbuf);
299 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
303 for (i=0; i< nbuf; i++) {
304 buf = (struct xenoprof_buf*)
305 &shared_buffer.buffer[i * get_buffer.bufsize];
306 BUG_ON(buf->vcpu_id >= nbuf);
307 xenoprof_buf[buf->vcpu_id] = buf;
314 static int xenoprof_setup(void)
318 if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
321 if ( (ret = bind_virq()) ) {
322 release_buffer_array(xenoprof_buf, nbuf);
326 if (xenoprof_is_primary) {
327 /* Define dom0 as an active domain if not done yet */
328 if (!active_defined) {
330 ret = HYPERVISOR_xenoprof_op(
331 XENOPROF_reset_active_list, NULL);
335 ret = HYPERVISOR_xenoprof_op(
336 XENOPROF_set_active, &domid);
342 if (oprofile_backtrace_depth > 0) {
343 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace,
344 &oprofile_backtrace_depth);
346 oprofile_backtrace_depth = 0;
349 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
353 xenoprof_arch_counter();
354 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
359 ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
363 xenoprof_enabled = 1;
367 release_buffer_array(xenoprof_buf, nbuf);
372 static void xenoprof_shutdown(void)
374 xenoprof_enabled = 0;
376 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
378 if (xenoprof_is_primary) {
379 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
386 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
387 if (xenoprof_is_primary)
388 unmap_passive_list();
389 release_buffer_array(xenoprof_buf, nbuf);
393 static int xenoprof_start(void)
397 if (xenoprof_is_primary)
398 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
400 xenoprof_arch_start();
405 static void xenoprof_stop(void)
407 if (xenoprof_is_primary)
408 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
409 xenoprof_arch_stop();
413 static int xenoprof_set_active(int * active_domains,
414 unsigned int adomains)
421 if (!xenoprof_is_primary)
424 if (adomains > MAX_OPROF_DOMAINS)
427 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
431 for (i=0; i<adomains; i++) {
432 domid = active_domains[i];
433 if (domid != active_domains[i]) {
437 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
440 if (active_domains[i] == 0)
443 /* dom0 must always be active but may not be in the list */
446 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
451 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
453 active_defined = !ret;
457 static int xenoprof_set_passive(int * p_domains,
462 struct xenoprof_buf *buf;
464 if (!xenoprof_is_primary)
467 if (pdoms > MAX_OPROF_DOMAINS)
470 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
473 unmap_passive_list();
475 for (i = 0; i < pdoms; i++) {
476 passive_domains[i].domain_id = p_domains[i];
477 passive_domains[i].max_samples = 2048;
478 ret = xenoprof_arch_set_passive(&passive_domains[i],
479 &p_shared_buffer[i]);
483 p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf);
484 if (!p_xenoprof_buf[i]) {
490 for (j = 0; j < passive_domains[i].nbuf; j++) {
491 buf = (struct xenoprof_buf *)
492 &p_shared_buffer[i].buffer[
493 j * passive_domains[i].bufsize];
494 BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf);
495 p_xenoprof_buf[i][buf->vcpu_id] = buf;
503 for (j = 0; j < i; j++) {
504 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
505 release_buffer_array(p_xenoprof_buf[i],
506 passive_domains[i].nbuf);
513 /* The dummy backtrace function to keep oprofile happy
514 * The real backtrace is done in xen
516 static void xenoprof_dummy_backtrace(struct pt_regs * const regs,
519 /* this should never be called */
525 static struct oprofile_operations xenoprof_ops = {
526 #ifdef HAVE_XENOPROF_CREATE_FILES
527 .create_files = xenoprof_create_files,
529 .set_active = xenoprof_set_active,
530 .set_passive = xenoprof_set_passive,
531 .setup = xenoprof_setup,
532 .shutdown = xenoprof_shutdown,
533 .start = xenoprof_start,
534 .stop = xenoprof_stop,
535 .backtrace = xenoprof_dummy_backtrace
539 /* in order to get driverfs right */
540 static int using_xenoprof;
542 int __init xenoprofile_init(struct oprofile_operations * ops)
544 struct xenoprof_init init;
548 ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
550 xenoprof_arch_init_counter(&init);
551 xenoprof_is_primary = init.is_primary;
553 /* cpu_type is detected by Xen */
554 cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
555 strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
556 xenoprof_ops.cpu_type = cpu_type;
562 for (i=0; i<NR_CPUS; i++)
568 printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
569 __func__, ret, init.num_events, xenoprof_is_primary);
574 void xenoprofile_exit(void)
579 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
580 if (xenoprof_is_primary) {
581 unmap_passive_list();
582 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));