- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / xenoprof / xenoprofile.c
1 /**
2  * @file xenoprofile.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * Modified by Aravind Menon and Jose Renato Santos for Xen
10  * These modifications are:
11  * Copyright (C) 2005 Hewlett-Packard Co.
12  *
13  * Separated out arch-generic part
14  * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15  *                    VA Linux Systems Japan K.K.
16  */
17
18 #include <linux/init.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/oprofile.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/vmalloc.h>
26 #include <asm/pgtable.h>
27 #include <xen/evtchn.h>
28 #include <xen/xenoprof.h>
29 #include <xen/interface/xen.h>
30 #include <xen/interface/xenoprof.h>
31 #include "../../../drivers/oprofile/event_buffer.h"
32
33 #define MAX_XENOPROF_SAMPLES 16
34
35 /* sample buffers shared with Xen */
36 static xenoprof_buf_t **__read_mostly xenoprof_buf;
37 /* Shared buffer area */
38 static struct xenoprof_shared_buffer shared_buffer;
39
40 /* Passive sample buffers shared with Xen */
41 static xenoprof_buf_t **__read_mostly p_xenoprof_buf[MAX_OPROF_DOMAINS];
42 /* Passive shared buffer area */
43 static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
44
45 static int xenoprof_start(void);
46 static void xenoprof_stop(void);
47
48 static int xenoprof_enabled = 0;
49 static int xenoprof_is_primary = 0;
50 static int active_defined;
51
52 extern unsigned long oprofile_backtrace_depth;
53
54 /* Number of buffers in shared area (one per VCPU) */
55 static int nbuf;
56 /* Mapping of VIRQ_XENOPROF to irq number */
57 static int ovf_irq = -1;
58 static cpumask_var_t ovf_irq_mapped;
59 /* cpu model type string - copied from Xen on XENOPROF_init command */
60 static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
61
62 #ifdef CONFIG_PM_SLEEP
63
64 static int xenoprof_suspend(void)
65 {
66         if (xenoprof_enabled == 1)
67                 xenoprof_stop();
68         return 0;
69 }
70
71
72 static void xenoprof_resume(void)
73 {
74         if (xenoprof_enabled == 1)
75                 xenoprof_start();
76 }
77
78
79 static struct syscore_ops oprofile_syscore_ops = {
80         .resume         = xenoprof_resume,
81         .suspend        = xenoprof_suspend
82 };
83
84
85 static int __init init_driverfs(void)
86 {
87         register_syscore_ops(&oprofile_syscore_ops);
88         return 0;
89 }
90
91
92 static void exit_driverfs(void)
93 {
94         unregister_syscore_ops(&oprofile_syscore_ops);
95 }
96
97 #else
98 #define init_driverfs() do { } while (0)
99 #define exit_driverfs() do { } while (0)
100 #endif /* CONFIG_PM_SLEEP */
101
102 static unsigned long long oprofile_samples;
103 static unsigned long long p_oprofile_samples;
104
105 static unsigned int pdomains;
106 static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
107
108 /* Check whether the given entry is an escape code */
109 static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
110 {
111 #if CONFIG_XEN_COMPAT < 0x040200 && !defined(CONFIG_64BIT)
112         if (buf->event_log[tail].eip == (unsigned long)XENOPROF_ESCAPE_CODE)
113                 return 1;
114 #endif
115         return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
116 }
117
118 /* Get the event at the given entry  */
119 static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
120 {
121         return (buf->event_log[tail].event);
122 }
123
124 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
125 {
126         int head, tail, size;
127         int tracing = 0;
128
129         head = buf->event_head;
130         tail = buf->event_tail;
131         size = buf->event_size;
132
133         while (tail != head) {
134                 if (xenoprof_is_escape(buf, tail) &&
135                     xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
136                         tracing=1;
137                         oprofile_add_mode(buf->event_log[tail].mode);
138                         if (!is_passive)
139                                 oprofile_samples++;
140                         else
141                                 p_oprofile_samples++;
142                         
143                 } else {
144                         oprofile_add_pc(buf->event_log[tail].eip,
145                                         buf->event_log[tail].mode,
146                                         buf->event_log[tail].event);
147                         if (!tracing) {
148                                 if (!is_passive)
149                                         oprofile_samples++;
150                                 else
151                                         p_oprofile_samples++;
152                         }
153        
154                 }
155                 tail++;
156                 if(tail==size)
157                     tail=0;
158         }
159         buf->event_tail = tail;
160 }
161
162 static void xenoprof_handle_passive(void)
163 {
164         int i, j;
165         int flag_domain, flag_switch = 0;
166         
167         for (i = 0; i < pdomains; i++) {
168                 flag_domain = 0;
169                 for (j = 0; j < passive_domains[i].nbuf; j++) {
170                         xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
171                         if (buf->event_head == buf->event_tail)
172                                 continue;
173                         if (!flag_domain) {
174                                 if (!oprofile_add_domain_switch(
175                                         passive_domains[i].domain_id))
176                                         goto done;
177                                 flag_domain = 1;
178                         }
179                         xenoprof_add_pc(buf, 1);
180                         flag_switch = 1;
181                 }
182         }
183 done:
184         if (flag_switch)
185                 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
186 }
187
188 static irqreturn_t xenoprof_ovf_interrupt(int irq, void *dev_id)
189 {
190         struct xenoprof_buf * buf;
191         static unsigned long flag;
192
193         buf = xenoprof_buf[smp_processor_id()];
194
195         xenoprof_add_pc(buf, 0);
196
197         if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
198                 xenoprof_handle_passive();
199                 smp_mb__before_clear_bit();
200                 clear_bit(0, &flag);
201         }
202
203         return IRQ_HANDLED;
204 }
205
206 static struct irqaction ovf_action = {
207         .handler = xenoprof_ovf_interrupt,
208         .flags   = IRQF_DISABLED,
209         .name    = "xenoprof"
210 };
211
212 static void unbind_virq(void)
213 {
214         unsigned int i;
215
216         for_each_online_cpu(i) {
217                 if (cpumask_test_and_clear_cpu(i, ovf_irq_mapped))
218                         unbind_from_per_cpu_irq(ovf_irq, i, &ovf_action);
219         }
220         ovf_irq = -1;
221 }
222
223
224 static int bind_virq(void)
225 {
226         unsigned int i;
227         int result;
228
229         for_each_online_cpu(i) {
230                 result = bind_virq_to_irqaction(VIRQ_XENOPROF, i, &ovf_action);
231
232                 if (result < 0) {
233                         unbind_virq();
234                         return result;
235                 }
236
237                 if (ovf_irq < 0)
238                         ovf_irq = result;
239                 else if (result != ovf_irq) {
240                         unbind_virq();
241                         pr_err("IRQ%d unexpected (should be %d)\n",
242                                result, ovf_irq);
243                         return -ESTALE;
244                 }
245                 cpumask_set_cpu(i, ovf_irq_mapped);
246         }
247                 
248         return 0;
249 }
250
251
252 static xenoprof_buf_t **get_buffer_array(unsigned int nbuf)
253 {
254         size_t size = nbuf * sizeof(xenoprof_buf_t);
255
256         if (size <= PAGE_SIZE)
257                 return kmalloc(size, GFP_KERNEL);
258         return vmalloc(size);
259 }
260
261 static void release_buffer_array(xenoprof_buf_t **buf, unsigned int nbuf)
262 {
263         if (nbuf * sizeof(xenoprof_buf_t) <= PAGE_SIZE)
264                 kfree(buf);
265         else
266                 vfree(buf);
267 }
268
269
270 static void unmap_passive_list(void)
271 {
272         int i;
273         for (i = 0; i < pdomains; i++) {
274                 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
275                 release_buffer_array(p_xenoprof_buf[i],
276                                      passive_domains[i].nbuf);
277         }
278         pdomains = 0;
279 }
280
281
282 static int map_xenoprof_buffer(int max_samples)
283 {
284         struct xenoprof_get_buffer get_buffer;
285         struct xenoprof_buf *buf;
286         int ret, i;
287
288         if ( shared_buffer.buffer )
289                 return 0;
290
291         get_buffer.max_samples = max_samples;
292         ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
293         if (ret)
294                 return ret;
295         nbuf = get_buffer.nbuf;
296
297         xenoprof_buf = get_buffer_array(nbuf);
298         if (!xenoprof_buf) {
299                 xenoprof_arch_unmap_shared_buffer(&shared_buffer);
300                 return -ENOMEM;
301         }
302
303         for (i=0; i< nbuf; i++) {
304                 buf = (struct xenoprof_buf*) 
305                         &shared_buffer.buffer[i * get_buffer.bufsize];
306                 BUG_ON(buf->vcpu_id >= nbuf);
307                 xenoprof_buf[buf->vcpu_id] = buf;
308         }
309
310         return 0;
311 }
312
313
314 static int xenoprof_setup(void)
315 {
316         int ret;
317
318         if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
319                 return ret;
320
321         if ( (ret = bind_virq()) ) {
322                 release_buffer_array(xenoprof_buf, nbuf);
323                 return ret;
324         }
325
326         if (xenoprof_is_primary) {
327                 /* Define dom0 as an active domain if not done yet */
328                 if (!active_defined) {
329                         domid_t domid;
330                         ret = HYPERVISOR_xenoprof_op(
331                                 XENOPROF_reset_active_list, NULL);
332                         if (ret)
333                                 goto err;
334                         domid = 0;
335                         ret = HYPERVISOR_xenoprof_op(
336                                 XENOPROF_set_active, &domid);
337                         if (ret)
338                                 goto err;
339                         active_defined = 1;
340                 }
341
342                 if (oprofile_backtrace_depth > 0) {
343                         ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace, 
344                                                      &oprofile_backtrace_depth);
345                         if (ret)
346                                 oprofile_backtrace_depth = 0;
347                 }
348
349                 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
350                 if (ret)
351                         goto err;
352                 
353                 xenoprof_arch_counter();
354                 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
355                 if (ret)
356                         goto err;
357         }
358
359         ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
360         if (ret)
361                 goto err;
362
363         xenoprof_enabled = 1;
364         return 0;
365  err:
366         unbind_virq();
367         release_buffer_array(xenoprof_buf, nbuf);
368         return ret;
369 }
370
371
372 static void xenoprof_shutdown(void)
373 {
374         xenoprof_enabled = 0;
375
376         WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
377
378         if (xenoprof_is_primary) {
379                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
380                                                NULL));
381                 active_defined = 0;
382         }
383
384         unbind_virq();
385
386         xenoprof_arch_unmap_shared_buffer(&shared_buffer);
387         if (xenoprof_is_primary)
388                 unmap_passive_list();
389         release_buffer_array(xenoprof_buf, nbuf);
390 }
391
392
393 static int xenoprof_start(void)
394 {
395         int ret = 0;
396
397         if (xenoprof_is_primary)
398                 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
399         if (!ret)
400                 xenoprof_arch_start();
401         return ret;
402 }
403
404
405 static void xenoprof_stop(void)
406 {
407         if (xenoprof_is_primary)
408                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
409         xenoprof_arch_stop();
410 }
411
412
413 static int xenoprof_set_active(int * active_domains,
414                                unsigned int adomains)
415 {
416         int ret = 0;
417         int i;
418         int set_dom0 = 0;
419         domid_t domid;
420
421         if (!xenoprof_is_primary)
422                 return 0;
423
424         if (adomains > MAX_OPROF_DOMAINS)
425                 return -E2BIG;
426
427         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
428         if (ret)
429                 return ret;
430
431         for (i=0; i<adomains; i++) {
432                 domid = active_domains[i];
433                 if (domid != active_domains[i]) {
434                         ret = -EINVAL;
435                         goto out;
436                 }
437                 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
438                 if (ret)
439                         goto out;
440                 if (active_domains[i] == 0)
441                         set_dom0 = 1;
442         }
443         /* dom0 must always be active but may not be in the list */ 
444         if (!set_dom0) {
445                 domid = 0;
446                 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
447         }
448
449 out:
450         if (ret)
451                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
452                                                NULL));
453         active_defined = !ret;
454         return ret;
455 }
456
457 static int xenoprof_set_passive(int * p_domains,
458                                 unsigned int pdoms)
459 {
460         int ret;
461         unsigned int i, j;
462         struct xenoprof_buf *buf;
463
464         if (!xenoprof_is_primary)
465                 return 0;
466
467         if (pdoms > MAX_OPROF_DOMAINS)
468                 return -E2BIG;
469
470         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
471         if (ret)
472                 return ret;
473         unmap_passive_list();
474
475         for (i = 0; i < pdoms; i++) {
476                 passive_domains[i].domain_id = p_domains[i];
477                 passive_domains[i].max_samples = 2048;
478                 ret = xenoprof_arch_set_passive(&passive_domains[i],
479                                                 &p_shared_buffer[i]);
480                 if (ret)
481                         goto out;
482
483                 p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf);
484                 if (!p_xenoprof_buf[i]) {
485                         ++i;
486                         ret = -ENOMEM;
487                         goto out;
488                 }
489
490                 for (j = 0; j < passive_domains[i].nbuf; j++) {
491                         buf = (struct xenoprof_buf *)
492                                 &p_shared_buffer[i].buffer[
493                                 j * passive_domains[i].bufsize];
494                         BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf);
495                         p_xenoprof_buf[i][buf->vcpu_id] = buf;
496                 }
497         }
498
499         pdomains = pdoms;
500         return 0;
501
502 out:
503         for (j = 0; j < i; j++) {
504                 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
505                 release_buffer_array(p_xenoprof_buf[i],
506                                      passive_domains[i].nbuf);
507         }
508
509         return ret;
510 }
511
512
513 /* The dummy backtrace function to keep oprofile happy
514  * The real backtrace is done in xen
515  */
516 static void xenoprof_dummy_backtrace(struct pt_regs * const regs, 
517                                      unsigned int depth)
518 {
519         /* this should never be called */
520         BUG();
521         return;
522 }
523
524
525 static struct oprofile_operations xenoprof_ops = {
526 #ifdef HAVE_XENOPROF_CREATE_FILES
527         .create_files   = xenoprof_create_files,
528 #endif
529         .set_active     = xenoprof_set_active,
530         .set_passive    = xenoprof_set_passive,
531         .setup          = xenoprof_setup,
532         .shutdown       = xenoprof_shutdown,
533         .start          = xenoprof_start,
534         .stop           = xenoprof_stop,
535         .backtrace      = xenoprof_dummy_backtrace
536 };
537
538
539 /* in order to get driverfs right */
540 static int using_xenoprof;
541
542 int __init xenoprofile_init(struct oprofile_operations * ops)
543 {
544         struct xenoprof_init init;
545         int ret;
546
547         if (!zalloc_cpumask_var(&ovf_irq_mapped, GFP_KERNEL))
548                 return -ENOMEM;
549
550         ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
551         if (!ret) {
552                 xenoprof_arch_init_counter(&init);
553                 xenoprof_is_primary = init.is_primary;
554
555                 /* cpu_type is detected by Xen */
556                 strlcpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE);
557                 xenoprof_ops.cpu_type = cpu_type;
558
559                 init_driverfs();
560                 using_xenoprof = 1;
561                 *ops = xenoprof_ops;
562
563                 active_defined = 0;
564         } else
565                 free_cpumask_var(ovf_irq_mapped);
566
567         pr_info("%s: ret %d, events %d, xenoprof_is_primary %d\n",
568                 __func__, ret, init.num_events, xenoprof_is_primary);
569         return ret;
570 }
571
572
573 void xenoprofile_exit(void)
574 {
575         if (using_xenoprof)
576                 exit_driverfs();
577
578         xenoprof_arch_unmap_shared_buffer(&shared_buffer);
579         if (xenoprof_is_primary) {
580                 unmap_passive_list();
581                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));
582         }
583
584         free_cpumask_var(ovf_irq_mapped);
585 }