- Updated to 2.6.22-rc2-git7:
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / xenoprof / xenoprofile.c
1 /**
2  * @file xenoprofile.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * Modified by Aravind Menon and Jose Renato Santos for Xen
10  * These modifications are:
11  * Copyright (C) 2005 Hewlett-Packard Co.
12  *
13  * Separated out arch-generic part
14  * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15  *                    VA Linux Systems Japan K.K.
16  */
17
18 #include <linux/init.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/oprofile.h>
22 #include <linux/sysdev.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/vmalloc.h>
26 #include <asm/pgtable.h>
27 #include <xen/evtchn.h>
28 #include <xen/xenoprof.h>
29 #include <xen/driver_util.h>
30 #include <xen/interface/xen.h>
31 #include <xen/interface/xenoprof.h>
32 #include "../../../drivers/oprofile/cpu_buffer.h"
33 #include "../../../drivers/oprofile/event_buffer.h"
34
35 #define MAX_XENOPROF_SAMPLES 16
36
37 /* sample buffers shared with Xen */
38 xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
39 /* Shared buffer area */
40 struct xenoprof_shared_buffer shared_buffer;
41
42 /* Passive sample buffers shared with Xen */
43 xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
44 /* Passive shared buffer area */
45 struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
46
47 static int xenoprof_start(void);
48 static void xenoprof_stop(void);
49
50 static int xenoprof_enabled = 0;
51 static int xenoprof_is_primary = 0;
52 static int active_defined;
53
54 /* Number of buffers in shared area (one per VCPU) */
55 int nbuf;
56 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
57 int ovf_irq[NR_CPUS];
58 /* cpu model type string - copied from Xen memory space on XENOPROF_init command */
59 char cpu_type[XENOPROF_CPU_TYPE_SIZE];
60
61 #ifdef CONFIG_PM
62
63 static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
64 {
65         if (xenoprof_enabled == 1)
66                 xenoprof_stop();
67         return 0;
68 }
69
70
71 static int xenoprof_resume(struct sys_device * dev)
72 {
73         if (xenoprof_enabled == 1)
74                 xenoprof_start();
75         return 0;
76 }
77
78
79 static struct sysdev_class oprofile_sysclass = {
80         set_kset_name("oprofile"),
81         .resume         = xenoprof_resume,
82         .suspend        = xenoprof_suspend
83 };
84
85
86 static struct sys_device device_oprofile = {
87         .id     = 0,
88         .cls    = &oprofile_sysclass,
89 };
90
91
92 static int __init init_driverfs(void)
93 {
94         int error;
95         if (!(error = sysdev_class_register(&oprofile_sysclass)))
96                 error = sysdev_register(&device_oprofile);
97         return error;
98 }
99
100
101 static void exit_driverfs(void)
102 {
103         sysdev_unregister(&device_oprofile);
104         sysdev_class_unregister(&oprofile_sysclass);
105 }
106
107 #else
108 #define init_driverfs() do { } while (0)
109 #define exit_driverfs() do { } while (0)
110 #endif /* CONFIG_PM */
111
112 unsigned long long oprofile_samples = 0;
113 unsigned long long p_oprofile_samples = 0;
114
115 unsigned int pdomains;
116 struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
117
118 static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
119 {
120         int head, tail, size;
121
122         head = buf->event_head;
123         tail = buf->event_tail;
124         size = buf->event_size;
125
126         if (tail > head) {
127                 while (tail < size) {
128                         oprofile_add_pc(buf->event_log[tail].eip,
129                                         buf->event_log[tail].mode,
130                                         buf->event_log[tail].event);
131                         if (!is_passive)
132                                 oprofile_samples++;
133                         else
134                                 p_oprofile_samples++;
135                         tail++;
136                 }
137                 tail = 0;
138         }
139         while (tail < head) {
140                 oprofile_add_pc(buf->event_log[tail].eip,
141                                 buf->event_log[tail].mode,
142                                 buf->event_log[tail].event);
143                 if (!is_passive)
144                         oprofile_samples++;
145                 else
146                         p_oprofile_samples++;
147                 tail++;
148         }
149
150         buf->event_tail = tail;
151 }
152
153 static void xenoprof_handle_passive(void)
154 {
155         int i, j;
156         int flag_domain, flag_switch = 0;
157         
158         for (i = 0; i < pdomains; i++) {
159                 flag_domain = 0;
160                 for (j = 0; j < passive_domains[i].nbuf; j++) {
161                         xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
162                         if (buf->event_head == buf->event_tail)
163                                 continue;
164                         if (!flag_domain) {
165                                 if (!oprofile_add_domain_switch(passive_domains[i].
166                                                                 domain_id))
167                                         goto done;
168                                 flag_domain = 1;
169                         }
170                         xenoprof_add_pc(buf, 1);
171                         flag_switch = 1;
172                 }
173         }
174 done:
175         if (flag_switch)
176                 oprofile_add_domain_switch(COORDINATOR_DOMAIN);
177 }
178
179 static irqreturn_t 
180 xenoprof_ovf_interrupt(int irq, void * dev_id)
181 {
182         struct xenoprof_buf * buf;
183         int cpu;
184         static unsigned long flag;
185
186         cpu = smp_processor_id();
187         buf = xenoprof_buf[cpu];
188
189         xenoprof_add_pc(buf, 0);
190
191         if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
192                 xenoprof_handle_passive();
193                 smp_mb__before_clear_bit();
194                 clear_bit(0, &flag);
195         }
196
197         return IRQ_HANDLED;
198 }
199
200
201 static void unbind_virq(void)
202 {
203         int i;
204
205         for_each_online_cpu(i) {
206                 if (ovf_irq[i] >= 0) {
207                         unbind_from_irqhandler(ovf_irq[i], NULL);
208                         ovf_irq[i] = -1;
209                 }
210         }
211 }
212
213
214 static int bind_virq(void)
215 {
216         int i, result;
217
218         for_each_online_cpu(i) {
219                 result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
220                                                  i,
221                                                  xenoprof_ovf_interrupt,
222                                                  SA_INTERRUPT,
223                                                  "xenoprof",
224                                                  NULL);
225
226                 if (result < 0) {
227                         unbind_virq();
228                         return result;
229                 }
230
231                 ovf_irq[i] = result;
232         }
233                 
234         return 0;
235 }
236
237
238 static void unmap_passive_list(void)
239 {
240         int i;
241         for (i = 0; i < pdomains; i++)
242                 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
243         pdomains = 0;
244 }
245
246
247 static int map_xenoprof_buffer(int max_samples)
248 {
249         struct xenoprof_get_buffer get_buffer;
250         struct xenoprof_buf *buf;
251         int ret, i;
252
253         if ( shared_buffer.buffer )
254                 return 0;
255
256         get_buffer.max_samples = max_samples;
257         ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
258         if (ret)
259                 return ret;
260         nbuf = get_buffer.nbuf;
261
262         for (i=0; i< nbuf; i++) {
263                 buf = (struct xenoprof_buf*) 
264                         &shared_buffer.buffer[i * get_buffer.bufsize];
265                 BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
266                 xenoprof_buf[buf->vcpu_id] = buf;
267         }
268
269         return 0;
270 }
271
272
273 static int xenoprof_setup(void)
274 {
275         int ret;
276
277         if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
278                 return ret;
279
280         if ( (ret = bind_virq()) )
281                 return ret;
282
283         if (xenoprof_is_primary) {
284                 /* Define dom0 as an active domain if not done yet */
285                 if (!active_defined) {
286                         domid_t domid;
287                         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
288                         if (ret)
289                                 goto err;
290                         domid = 0;
291                         ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
292                         if (ret)
293                                 goto err;
294                         active_defined = 1;
295                 }
296
297                 ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
298                 if (ret)
299                         goto err;
300                 xenoprof_arch_counter();
301                 ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
302
303                 if (ret)
304                         goto err;
305         }
306
307         ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
308         if (ret)
309                 goto err;
310
311         xenoprof_enabled = 1;
312         return 0;
313  err:
314         unbind_virq();
315         return ret;
316 }
317
318
319 static void xenoprof_shutdown(void)
320 {
321         xenoprof_enabled = 0;
322
323         HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL);
324
325         if (xenoprof_is_primary) {
326                 HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL);
327                 active_defined = 0;
328         }
329
330         unbind_virq();
331
332         xenoprof_arch_unmap_shared_buffer(&shared_buffer);
333         if (xenoprof_is_primary)
334                 unmap_passive_list();
335 }
336
337
338 static int xenoprof_start(void)
339 {
340         int ret = 0;
341
342         if (xenoprof_is_primary)
343                 ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
344         if (!ret)
345                 xenoprof_arch_start();
346         return ret;
347 }
348
349
350 static void xenoprof_stop(void)
351 {
352         if (xenoprof_is_primary)
353                 HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL);
354         xenoprof_arch_stop();
355 }
356
357
358 static int xenoprof_set_active(int * active_domains,
359                                unsigned int adomains)
360 {
361         int ret = 0;
362         int i;
363         int set_dom0 = 0;
364         domid_t domid;
365
366         if (!xenoprof_is_primary)
367                 return 0;
368
369         if (adomains > MAX_OPROF_DOMAINS)
370                 return -E2BIG;
371
372         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
373         if (ret)
374                 return ret;
375
376         for (i=0; i<adomains; i++) {
377                 domid = active_domains[i];
378                 if (domid != active_domains[i]) {
379                         ret = -EINVAL;
380                         goto out;
381                 }
382                 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
383                 if (ret)
384                         goto out;
385                 if (active_domains[i] == 0)
386                         set_dom0 = 1;
387         }
388         /* dom0 must always be active but may not be in the list */ 
389         if (!set_dom0) {
390                 domid = 0;
391                 ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
392         }
393
394 out:
395         if (ret)
396                 HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
397         active_defined = !ret;
398         return ret;
399 }
400
401 static int xenoprof_set_passive(int * p_domains,
402                                 unsigned int pdoms)
403 {
404         int ret;
405         int i, j;
406         struct xenoprof_buf *buf;
407
408         if (!xenoprof_is_primary)
409                 return 0;
410
411         if (pdoms > MAX_OPROF_DOMAINS)
412                 return -E2BIG;
413
414         ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
415         if (ret)
416                 return ret;
417         unmap_passive_list();
418
419         for (i = 0; i < pdoms; i++) {
420                 passive_domains[i].domain_id = p_domains[i];
421                 passive_domains[i].max_samples = 2048;
422                 ret = xenoprof_arch_set_passive(&passive_domains[i],
423                                                 &p_shared_buffer[i]);
424                 if (ret)
425                         goto out;
426                 for (j = 0; j < passive_domains[i].nbuf; j++) {
427                         buf = (struct xenoprof_buf *)
428                                 &p_shared_buffer[i].buffer[j * passive_domains[i].bufsize];
429                         BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
430                         p_xenoprof_buf[i][buf->vcpu_id] = buf;
431                 }
432         }
433
434         pdomains = pdoms;
435         return 0;
436
437 out:
438         for (j = 0; j < i; j++)
439                 xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
440
441         return ret;
442 }
443
444 struct oprofile_operations xenoprof_ops = {
445 #ifdef HAVE_XENOPROF_CREATE_FILES
446         .create_files   = xenoprof_create_files,
447 #endif
448         .set_active     = xenoprof_set_active,
449         .set_passive    = xenoprof_set_passive,
450         .setup          = xenoprof_setup,
451         .shutdown       = xenoprof_shutdown,
452         .start          = xenoprof_start,
453         .stop           = xenoprof_stop
454 };
455
456
457 /* in order to get driverfs right */
458 static int using_xenoprof;
459
460 int __init xenoprofile_init(struct oprofile_operations * ops)
461 {
462         struct xenoprof_init init;
463         int ret, i;
464
465         ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
466         if (!ret) {
467                 xenoprof_arch_init_counter(&init);
468                 xenoprof_is_primary = init.is_primary;
469
470                 /*  cpu_type is detected by Xen */
471                 cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
472                 strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
473                 xenoprof_ops.cpu_type = cpu_type;
474
475                 init_driverfs();
476                 using_xenoprof = 1;
477                 *ops = xenoprof_ops;
478
479                 for (i=0; i<NR_CPUS; i++)
480                         ovf_irq[i] = -1;
481
482                 active_defined = 0;
483         }
484         printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
485                __func__, ret, init.num_events, xenoprof_is_primary);
486         return ret;
487 }
488
489
490 void xenoprofile_exit(void)
491 {
492         if (using_xenoprof)
493                 exit_driverfs();
494
495         xenoprof_arch_unmap_shared_buffer(&shared_buffer);
496         if (xenoprof_is_primary) {
497                 unmap_passive_list();
498                 HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
499         }
500 }