- supported.conf: Added sparse_keymap (eeepc_laptop depends on it)
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / xenoprof / xenoprofile.c
1 /**
2  * @file xenoprofile.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * Modified by Aravind Menon and Jose Renato Santos for Xen
10  * These modifications are:
11  * Copyright (C) 2005 Hewlett-Packard Co.
12  *
13  * Separated out arch-generic part
14  * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15  *                    VA Linux Systems Japan K.K.
16  */
17
18 #include <linux/init.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/oprofile.h>
22 #include <linux/sysdev.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/vmalloc.h>
26 #include <asm/pgtable.h>
27 #include <xen/evtchn.h>
28 #include <xen/xenoprof.h>
29 #include <xen/driver_util.h>
30 #include <xen/interface/xen.h>
31 #include <xen/interface/xenoprof.h>
32 #include "../../../drivers/oprofile/event_buffer.h"
33
34 #define MAX_XENOPROF_SAMPLES 16
35
36 /* sample buffers shared with Xen */
37 static xenoprof_buf_t **__read_mostly xenoprof_buf;
38 /* Shared buffer area */
39 static struct xenoprof_shared_buffer shared_buffer;
40
41 /* Passive sample buffers shared with Xen */
42 static xenoprof_buf_t **__read_mostly p_xenoprof_buf[MAX_OPROF_DOMAINS];
43 /* Passive shared buffer area */
44 static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
45
46 static int xenoprof_start(void);
47 static void xenoprof_stop(void);
48
49 static int xenoprof_enabled = 0;
50 static int xenoprof_is_primary = 0;
51 static int active_defined;
52
53 extern unsigned long oprofile_backtrace_depth;
54
55 /* Number of buffers in shared area (one per VCPU) */
56 static int nbuf;
57 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
58 static int ovf_irq[NR_CPUS];
59 /* cpu model type string - copied from Xen on XENOPROF_init command */
60 static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
61
62 #ifdef CONFIG_PM
63
64 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
65 {
66         if (xenoprof_enabled == 1)
67                 xenoprof_stop();
68         return 0;
69 }
70
71
72 static int xenoprof_resume(struct sys_device * dev)
73 {
74         if (xenoprof_enabled == 1)
75                 xenoprof_start();
76         return 0;
77 }
78
79
80 static struct sysdev_class oprofile_sysclass = {
81         .name           = "oprofile",
82         .resume         = xenoprof_resume,
83         .suspend        = xenoprof_suspend
84 };
85
86
87 static struct sys_device device_oprofile = {
88         .id     = 0,
89         .cls    = &oprofile_sysclass,
90 };
91
92
93 static int __init init_driverfs(void)
94 {
95         int error;
96         if (!(error = sysdev_class_register(&oprofile_sysclass)))
97                 error = sysdev_register(&device_oprofile);
98         return error;
99 }
100
101
102 static void exit_driverfs(void)
103 {
104         sysdev_unregister(&device_oprofile);
105         sysdev_class_unregister(&oprofile_sysclass);
106 }
107
108 #else
109 #define init_driverfs() do { } while (0)
110 #define exit_driverfs() do { } while (0)
111 #endif /* CONFIG_PM */
112
113 static unsigned long long oprofile_samples;
114 static unsigned long long p_oprofile_samples;
115
116 static unsigned int pdomains;
117 static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
118
119 /* Check whether the given entry is an escape code */
120 static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
121 {
122         return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
123 }
124
125 /* Get the event at the given entry  */
126 static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
127 {
128         return (buf->event_log[tail].event);
129 }
130
131 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
132 {
133         int head, tail, size;
134         int tracing = 0;
135
136         head = buf->event_head;
137         tail = buf->event_tail;
138         size = buf->event_size;
139
140         while (tail != head) {
141                 if (xenoprof_is_escape(buf, tail) &&
142                     xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
143                         tracing=1;
144                         oprofile_add_mode(buf->event_log[tail].mode);
145                         if (!is_passive)
146                                 oprofile_samples++;
147                         else
148                                 p_oprofile_samples++;
149                         
150                 } else {
151                         oprofile_add_pc(buf->event_log[tail].eip,
152                                         buf->event_log[tail].mode,
153                                         buf->event_log[tail].event);
154                         if (!tracing) {
155                                 if (!is_passive)
156                                         oprofile_samples++;
157                                 else
158                                         p_oprofile_samples++;
159                         }
160        
161                 }
162                 tail++;
163                 if(tail==size)
164                     tail=0;
165         }
166         buf->event_tail = tail;
167 }
168
169 static void xenoprof_handle_passive(void)
170 {
171         int i, j;
172         int flag_domain, flag_switch = 0;
173         
174         for (i = 0; i < pdomains; i++) {
175                 flag_domain = 0;
176                 for (j = 0; j < passive_domains[i].nbuf; j++) {
177                         xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
178                         if (buf->event_head == buf->event_tail)
179                                 continue;
180                         if (!flag_domain) {
181                                 if (!oprofile_add_domain_switch(
182                                         passive_domains[i].domain_id))
183                                         goto done;
184                                 flag_domain = 1;
185                         }
186                         xenoprof_add_pc(buf, 1);
187                         flag_switch = 1;
188                 }
189         }
190 done:
191         if (flag_switch)
192                 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
193 }
194
195 static irqreturn_t xenoprof_ovf_interrupt(int irq, void *dev_id)
196 {
197         struct xenoprof_buf * buf;
198         static unsigned long flag;
199
200         buf = xenoprof_buf[smp_processor_id()];
201
202         xenoprof_add_pc(buf, 0);
203
204         if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
205                 xenoprof_handle_passive();
206                 smp_mb__before_clear_bit();
207                 clear_bit(0, &flag);
208         }
209
210         return IRQ_HANDLED;
211 }
212
213 static struct irqaction ovf_action = {
214         .handler = xenoprof_ovf_interrupt,
215         .flags   = IRQF_DISABLED,
216         .name    = "xenoprof"
217 };
218
219 static void unbind_virq(void)
220 {
221         unsigned int i;
222
223         for_each_online_cpu(i) {
224                 if (ovf_irq[i] >= 0) {
225                         unbind_from_per_cpu_irq(ovf_irq[i], i, &ovf_action);
226                         ovf_irq[i] = -1;
227                 }
228         }
229 }
230
231
232 static int bind_virq(void)
233 {
234         unsigned int i;
235         int result;
236
237         for_each_online_cpu(i) {
238                 result = bind_virq_to_irqaction(VIRQ_XENOPROF, i, &ovf_action);
239
240                 if (result < 0) {
241                         unbind_virq();
242                         return result;
243                 }
244
245                 ovf_irq[i] = result;
246         }
247                 
248         return 0;
249 }
250
251
252 static xenoprof_buf_t **get_buffer_array(unsigned int nbuf)
253 {
254         size_t size = nbuf * sizeof(xenoprof_buf_t);
255
256         if (size <= PAGE_SIZE)
257                 return kmalloc(size, GFP_KERNEL);
258         return vmalloc(size);
259 }
260
261 static void release_buffer_array(xenoprof_buf_t **buf, unsigned int nbuf)
262 {
263         if (nbuf * sizeof(xenoprof_buf_t) <= PAGE_SIZE)
264                 kfree(buf);
265         else
266                 vfree(buf);
267 }
268
269
270 static void unmap_passive_list(void)
271 {
272         int i;
273         for (i = 0; i < pdomains; i++) {
274                 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
275                 release_buffer_array(p_xenoprof_buf[i],
276                                      passive_domains[i].nbuf);
277         }
278         pdomains = 0;
279 }
280
281
282 static int map_xenoprof_buffer(int max_samples)
283 {
284         struct xenoprof_get_buffer get_buffer;
285         struct xenoprof_buf *buf;
286         int ret, i;
287
288         if ( shared_buffer.buffer )
289                 return 0;
290
291         get_buffer.max_samples = max_samples;
292         ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
293         if (ret)
294                 return ret;
295         nbuf = get_buffer.nbuf;
296
297         xenoprof_buf = get_buffer_array(nbuf);
298         if (!xenoprof_buf) {
299                 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
300                 return -ENOMEM;
301         }
302
303         for (i=0; i< nbuf; i++) {
304                 buf = (struct xenoprof_buf*) 
305                         &shared_buffer.buffer[i * get_buffer.bufsize];
306                 BUG_ON(buf->vcpu_id >= nbuf);
307                 xenoprof_buf[buf->vcpu_id] = buf;
308         }
309
310         return 0;
311 }
312
313
314 static int xenoprof_setup(void)
315 {
316         int ret;
317
318         if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
319                 return ret;
320
321         if ( (ret = bind_virq()) ) {
322                 release_buffer_array(xenoprof_buf, nbuf);
323                 return ret;
324         }
325
326         if (xenoprof_is_primary) {
327                 /* Define dom0 as an active domain if not done yet */
328                 if (!active_defined) {
329                         domid_t domid;
330                         ret = HYPERVISOR_xenoprof_op(
331                                 XENOPROF_reset_active_list, NULL);
332                         if (ret)
333                                 goto err;
334                         domid = 0;
335                         ret = HYPERVISOR_xenoprof_op(
336                                 XENOPROF_set_active, &domid);
337                         if (ret)
338                                 goto err;
339                         active_defined = 1;
340                 }
341
342                 if (oprofile_backtrace_depth > 0) {
343                         ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace, 
344                                                      &oprofile_backtrace_depth);
345                         if (ret)
346                                 oprofile_backtrace_depth = 0;
347                 }
348
349                 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
350                 if (ret)
351                         goto err;
352                 
353                 xenoprof_arch_counter();
354                 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
355                 if (ret)
356                         goto err;
357         }
358
359         ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
360         if (ret)
361                 goto err;
362
363         xenoprof_enabled = 1;
364         return 0;
365  err:
366         unbind_virq();
367         release_buffer_array(xenoprof_buf, nbuf);
368         return ret;
369 }
370
371
372 static void xenoprof_shutdown(void)
373 {
374         xenoprof_enabled = 0;
375
376         WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
377
378         if (xenoprof_is_primary) {
379                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
380                                                NULL));
381                 active_defined = 0;
382         }
383
384         unbind_virq();
385
386         xenoprof_arch_unmap_shared_buffer(&shared_buffer);
387         if (xenoprof_is_primary)
388                 unmap_passive_list();
389         release_buffer_array(xenoprof_buf, nbuf);
390 }
391
392
393 static int xenoprof_start(void)
394 {
395         int ret = 0;
396
397         if (xenoprof_is_primary)
398                 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
399         if (!ret)
400                 xenoprof_arch_start();
401         return ret;
402 }
403
404
405 static void xenoprof_stop(void)
406 {
407         if (xenoprof_is_primary)
408                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
409         xenoprof_arch_stop();
410 }
411
412
413 static int xenoprof_set_active(int * active_domains,
414                                unsigned int adomains)
415 {
416         int ret = 0;
417         int i;
418         int set_dom0 = 0;
419         domid_t domid;
420
421         if (!xenoprof_is_primary)
422                 return 0;
423
424         if (adomains > MAX_OPROF_DOMAINS)
425                 return -E2BIG;
426
427         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
428         if (ret)
429                 return ret;
430
431         for (i=0; i<adomains; i++) {
432                 domid = active_domains[i];
433                 if (domid != active_domains[i]) {
434                         ret = -EINVAL;
435                         goto out;
436                 }
437                 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
438                 if (ret)
439                         goto out;
440                 if (active_domains[i] == 0)
441                         set_dom0 = 1;
442         }
443         /* dom0 must always be active but may not be in the list */ 
444         if (!set_dom0) {
445                 domid = 0;
446                 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
447         }
448
449 out:
450         if (ret)
451                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
452                                                NULL));
453         active_defined = !ret;
454         return ret;
455 }
456
457 static int xenoprof_set_passive(int * p_domains,
458                                 unsigned int pdoms)
459 {
460         int ret;
461         unsigned int i, j;
462         struct xenoprof_buf *buf;
463
464         if (!xenoprof_is_primary)
465                 return 0;
466
467         if (pdoms > MAX_OPROF_DOMAINS)
468                 return -E2BIG;
469
470         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
471         if (ret)
472                 return ret;
473         unmap_passive_list();
474
475         for (i = 0; i < pdoms; i++) {
476                 passive_domains[i].domain_id = p_domains[i];
477                 passive_domains[i].max_samples = 2048;
478                 ret = xenoprof_arch_set_passive(&passive_domains[i],
479                                                 &p_shared_buffer[i]);
480                 if (ret)
481                         goto out;
482
483                 p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf);
484                 if (!p_xenoprof_buf[i]) {
485                         ++i;
486                         ret = -ENOMEM;
487                         goto out;
488                 }
489
490                 for (j = 0; j < passive_domains[i].nbuf; j++) {
491                         buf = (struct xenoprof_buf *)
492                                 &p_shared_buffer[i].buffer[
493                                 j * passive_domains[i].bufsize];
494                         BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf);
495                         p_xenoprof_buf[i][buf->vcpu_id] = buf;
496                 }
497         }
498
499         pdomains = pdoms;
500         return 0;
501
502 out:
503         for (j = 0; j < i; j++) {
504                 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
505                 release_buffer_array(p_xenoprof_buf[i],
506                                      passive_domains[i].nbuf);
507         }
508
509         return ret;
510 }
511
512
513 /* The dummy backtrace function to keep oprofile happy
514  * The real backtrace is done in xen
515  */
516 static void xenoprof_dummy_backtrace(struct pt_regs * const regs, 
517                                      unsigned int depth)
518 {
519         /* this should never be called */
520         BUG();
521         return;
522 }
523
524
525 static struct oprofile_operations xenoprof_ops = {
526 #ifdef HAVE_XENOPROF_CREATE_FILES
527         .create_files   = xenoprof_create_files,
528 #endif
529         .set_active     = xenoprof_set_active,
530         .set_passive    = xenoprof_set_passive,
531         .setup          = xenoprof_setup,
532         .shutdown       = xenoprof_shutdown,
533         .start          = xenoprof_start,
534         .stop           = xenoprof_stop,
535         .backtrace      = xenoprof_dummy_backtrace
536 };
537
538
539 /* in order to get driverfs right */
540 static int using_xenoprof;
541
542 int __init xenoprofile_init(struct oprofile_operations * ops)
543 {
544         struct xenoprof_init init;
545         unsigned int i;
546         int ret;
547
548         ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
549         if (!ret) {
550                 xenoprof_arch_init_counter(&init);
551                 xenoprof_is_primary = init.is_primary;
552
553                 /*  cpu_type is detected by Xen */
554                 cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
555                 strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
556                 xenoprof_ops.cpu_type = cpu_type;
557
558                 init_driverfs();
559                 using_xenoprof = 1;
560                 *ops = xenoprof_ops;
561
562                 for (i=0; i<NR_CPUS; i++)
563                         ovf_irq[i] = -1;
564
565                 active_defined = 0;
566         }
567
568         printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
569                __func__, ret, init.num_events, xenoprof_is_primary);
570         return ret;
571 }
572
573
574 void xenoprofile_exit(void)
575 {
576         if (using_xenoprof)
577                 exit_driverfs();
578
579         xenoprof_arch_unmap_shared_buffer(&shared_buffer);
580         if (xenoprof_is_primary) {
581                 unmap_passive_list();
582                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));
583         }
584 }