4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * Modified by Aravind Menon and Jose Renato Santos for Xen
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
13 * Separated out arch-generic part
14 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15 * VA Linux Systems Japan K.K.
18 #include <linux/init.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/oprofile.h>
22 #include <linux/sysdev.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/vmalloc.h>
26 #include <asm/pgtable.h>
27 #include <xen/evtchn.h>
28 #include <xen/xenoprof.h>
29 #include <xen/driver_util.h>
30 #include <xen/interface/xen.h>
31 #include <xen/interface/xenoprof.h>
32 #include "../../../drivers/oprofile/cpu_buffer.h"
33 #include "../../../drivers/oprofile/event_buffer.h"
35 #define MAX_XENOPROF_SAMPLES 16
37 /* sample buffers shared with Xen */
38 xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
39 /* Shared buffer area */
40 struct xenoprof_shared_buffer shared_buffer;
42 /* Passive sample buffers shared with Xen */
43 xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
44 /* Passive shared buffer area */
45 struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
47 static int xenoprof_start(void);
48 static void xenoprof_stop(void);
50 static int xenoprof_enabled = 0;
51 static int xenoprof_is_primary = 0;
52 static int active_defined;
54 /* Number of buffers in shared area (one per VCPU) */
56 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
58 /* cpu model type string - copied from Xen memory space on XENOPROF_init command */
59 char cpu_type[XENOPROF_CPU_TYPE_SIZE];
63 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
65 if (xenoprof_enabled == 1)
71 static int xenoprof_resume(struct sys_device * dev)
73 if (xenoprof_enabled == 1)
79 static struct sysdev_class oprofile_sysclass = {
80 set_kset_name("oprofile"),
81 .resume = xenoprof_resume,
82 .suspend = xenoprof_suspend
86 static struct sys_device device_oprofile = {
88 .cls = &oprofile_sysclass,
92 static int __init init_driverfs(void)
95 if (!(error = sysdev_class_register(&oprofile_sysclass)))
96 error = sysdev_register(&device_oprofile);
101 static void exit_driverfs(void)
103 sysdev_unregister(&device_oprofile);
104 sysdev_class_unregister(&oprofile_sysclass);
108 #define init_driverfs() do { } while (0)
109 #define exit_driverfs() do { } while (0)
110 #endif /* CONFIG_PM */
112 unsigned long long oprofile_samples = 0;
113 unsigned long long p_oprofile_samples = 0;
115 unsigned int pdomains;
116 struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
118 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
120 int head, tail, size;
122 head = buf->event_head;
123 tail = buf->event_tail;
124 size = buf->event_size;
127 while (tail < size) {
128 oprofile_add_pc(buf->event_log[tail].eip,
129 buf->event_log[tail].mode,
130 buf->event_log[tail].event);
134 p_oprofile_samples++;
139 while (tail < head) {
140 oprofile_add_pc(buf->event_log[tail].eip,
141 buf->event_log[tail].mode,
142 buf->event_log[tail].event);
146 p_oprofile_samples++;
150 buf->event_tail = tail;
153 static void xenoprof_handle_passive(void)
156 int flag_domain, flag_switch = 0;
158 for (i = 0; i < pdomains; i++) {
160 for (j = 0; j < passive_domains[i].nbuf; j++) {
161 xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
162 if (buf->event_head == buf->event_tail)
165 if (!oprofile_add_domain_switch(passive_domains[i].
170 xenoprof_add_pc(buf, 1);
176 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
180 xenoprof_ovf_interrupt(int irq, void * dev_id)
182 struct xenoprof_buf * buf;
184 static unsigned long flag;
186 cpu = smp_processor_id();
187 buf = xenoprof_buf[cpu];
189 xenoprof_add_pc(buf, 0);
191 if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
192 xenoprof_handle_passive();
193 smp_mb__before_clear_bit();
201 static void unbind_virq(void)
205 for_each_online_cpu(i) {
206 if (ovf_irq[i] >= 0) {
207 unbind_from_irqhandler(ovf_irq[i], NULL);
214 static int bind_virq(void)
218 for_each_online_cpu(i) {
219 result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
221 xenoprof_ovf_interrupt,
238 static void unmap_passive_list(void)
241 for (i = 0; i < pdomains; i++)
242 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
247 static int map_xenoprof_buffer(int max_samples)
249 struct xenoprof_get_buffer get_buffer;
250 struct xenoprof_buf *buf;
253 if ( shared_buffer.buffer )
256 get_buffer.max_samples = max_samples;
257 ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
260 nbuf = get_buffer.nbuf;
262 for (i=0; i< nbuf; i++) {
263 buf = (struct xenoprof_buf*)
264 &shared_buffer.buffer[i * get_buffer.bufsize];
265 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
266 xenoprof_buf[buf->vcpu_id] = buf;
273 static int xenoprof_setup(void)
277 if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
280 if ( (ret = bind_virq()) )
283 if (xenoprof_is_primary) {
284 /* Define dom0 as an active domain if not done yet */
285 if (!active_defined) {
287 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
291 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
297 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
300 xenoprof_arch_counter();
301 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
307 ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
311 xenoprof_enabled = 1;
319 static void xenoprof_shutdown(void)
321 xenoprof_enabled = 0;
323 HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
325 if (xenoprof_is_primary) {
326 HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
332 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
333 if (xenoprof_is_primary)
334 unmap_passive_list();
338 static int xenoprof_start(void)
342 if (xenoprof_is_primary)
343 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
345 xenoprof_arch_start();
350 static void xenoprof_stop(void)
352 if (xenoprof_is_primary)
353 HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
354 xenoprof_arch_stop();
358 static int xenoprof_set_active(int * active_domains,
359 unsigned int adomains)
366 if (!xenoprof_is_primary)
369 if (adomains > MAX_OPROF_DOMAINS)
372 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
376 for (i=0; i<adomains; i++) {
377 domid = active_domains[i];
378 if (domid != active_domains[i]) {
382 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
385 if (active_domains[i] == 0)
388 /* dom0 must always be active but may not be in the list */
391 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
396 HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
397 active_defined = !ret;
401 static int xenoprof_set_passive(int * p_domains,
406 struct xenoprof_buf *buf;
408 if (!xenoprof_is_primary)
411 if (pdoms > MAX_OPROF_DOMAINS)
414 ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
417 unmap_passive_list();
419 for (i = 0; i < pdoms; i++) {
420 passive_domains[i].domain_id = p_domains[i];
421 passive_domains[i].max_samples = 2048;
422 ret = xenoprof_arch_set_passive(&passive_domains[i],
423 &p_shared_buffer[i]);
426 for (j = 0; j < passive_domains[i].nbuf; j++) {
427 buf = (struct xenoprof_buf *)
428 &p_shared_buffer[i].buffer[j * passive_domains[i].bufsize];
429 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
430 p_xenoprof_buf[i][buf->vcpu_id] = buf;
438 for (j = 0; j < i; j++)
439 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
444 struct oprofile_operations xenoprof_ops = {
445 #ifdef HAVE_XENOPROF_CREATE_FILES
446 .create_files = xenoprof_create_files,
448 .set_active = xenoprof_set_active,
449 .set_passive = xenoprof_set_passive,
450 .setup = xenoprof_setup,
451 .shutdown = xenoprof_shutdown,
452 .start = xenoprof_start,
453 .stop = xenoprof_stop
457 /* in order to get driverfs right */
458 static int using_xenoprof;
460 int __init xenoprofile_init(struct oprofile_operations * ops)
462 struct xenoprof_init init;
465 ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
467 xenoprof_arch_init_counter(&init);
468 xenoprof_is_primary = init.is_primary;
470 /* cpu_type is detected by Xen */
471 cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
472 strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
473 xenoprof_ops.cpu_type = cpu_type;
479 for (i=0; i<NR_CPUS; i++)
484 printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
485 __func__, ret, init.num_events, xenoprof_is_primary);
490 void xenoprofile_exit(void)
495 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
496 if (xenoprof_is_primary) {
497 unmap_passive_list();
498 HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);