Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / oprofile / xenoprof.c
1 /**
2  * @file xenoprof.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * Modified by Aravind Menon and Jose Renato Santos for Xen
10  * These modifications are:
11  * Copyright (C) 2005 Hewlett-Packard Co.
12  *
13  * x86-specific part
14  * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
15  *                    VA Linux Systems Japan K.K.
16  */
17
18 #include <linux/init.h>
19 #include <linux/oprofile.h>
20 #include <linux/sched.h>
21 #include <linux/vmalloc.h>
22 #include <asm/pgtable.h>
23
24 #include <xen/interface/xen.h>
25 #include <xen/interface/xenoprof.h>
26 #include <xen/xenoprof.h>
27 #include "op_counter.h"
28
29 static unsigned int num_events = 0;
30
31 void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
32 {
33         num_events = init->num_events;
34         /* just in case - make sure we do not overflow event list 
35            (i.e. counter_config list) */
36         if (num_events > OP_MAX_COUNTER) {
37                 num_events = OP_MAX_COUNTER;
38                 init->num_events = num_events;
39         }
40 }
41
42 void xenoprof_arch_counter(void)
43 {
44         int i;
45         struct xenoprof_counter counter;
46
47         for (i=0; i<num_events; i++) {
48                 counter.ind       = i;
49                 counter.count     = (uint64_t)counter_config[i].count;
50                 counter.enabled   = (uint32_t)counter_config[i].enabled;
51                 counter.event     = (uint32_t)counter_config[i].event;
52                 counter.kernel    = (uint32_t)counter_config[i].kernel;
53                 counter.user      = (uint32_t)counter_config[i].user;
54                 counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
55                 WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_counter,
56                                                &counter));
57         }
58 }
59
60 void xenoprof_arch_start(void) 
61 {
62         /* nothing */
63 }
64
65 void xenoprof_arch_stop(void)
66 {
67         /* nothing */
68 }
69
70 void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
71 {
72         if (sbuf->buffer) {
73                 vunmap(sbuf->buffer);
74                 sbuf->buffer = NULL;
75         }
76 }
77
78 int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
79                                     struct xenoprof_shared_buffer * sbuf)
80 {
81         int npages, ret;
82         struct vm_struct *area;
83
84         sbuf->buffer = NULL;
85         if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
86                 return ret;
87
88         npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
89
90         area = alloc_vm_area(npages * PAGE_SIZE, NULL);
91         if (area == NULL)
92                 return -ENOMEM;
93
94         if ( (ret = direct_kernel_remap_pfn_range(
95                       (unsigned long)area->addr,
96                       get_buffer->buf_gmaddr >> PAGE_SHIFT,
97                       npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
98                       DOMID_SELF)) ) {
99                 vunmap(area->addr);
100                 return ret;
101         }
102
103         sbuf->buffer = area->addr;
104         return ret;
105 }
106
107 int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
108                               struct xenoprof_shared_buffer * sbuf)
109 {
110         int ret;
111         int npages;
112         struct vm_struct *area;
113         pgprot_t prot = __pgprot(_KERNPG_TABLE);
114
115         sbuf->buffer = NULL;
116         ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
117         if (ret)
118                 goto out;
119
120         npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
121
122         area = alloc_vm_area(npages * PAGE_SIZE, NULL);
123         if (area == NULL) {
124                 ret = -ENOMEM;
125                 goto out;
126         }
127
128         ret = direct_kernel_remap_pfn_range(
129                 (unsigned long)area->addr,
130                 pdomain->buf_gmaddr >> PAGE_SHIFT,
131                 npages * PAGE_SIZE, prot, DOMID_SELF);
132         if (ret) {
133                 vunmap(area->addr);
134                 goto out;
135         }
136         sbuf->buffer = area->addr;
137
138 out:
139         return ret;
140 }
141
142 struct op_counter_config counter_config[OP_MAX_COUNTER];
143
144 int xenoprof_create_files(struct super_block * sb, struct dentry * root)
145 {
146         unsigned int i;
147
148         for (i = 0; i < num_events; ++i) {
149                 struct dentry * dir;
150                 char buf[2];
151  
152                 snprintf(buf, 2, "%d", i);
153                 dir = oprofilefs_mkdir(sb, root, buf);
154                 oprofilefs_create_ulong(sb, dir, "enabled",
155                                         &counter_config[i].enabled);
156                 oprofilefs_create_ulong(sb, dir, "event",
157                                         &counter_config[i].event);
158                 oprofilefs_create_ulong(sb, dir, "count",
159                                         &counter_config[i].count);
160                 oprofilefs_create_ulong(sb, dir, "unit_mask",
161                                         &counter_config[i].unit_mask);
162                 oprofilefs_create_ulong(sb, dir, "kernel",
163                                         &counter_config[i].kernel);
164                 oprofilefs_create_ulong(sb, dir, "user",
165                                         &counter_config[i].user);
166         }
167
168         return 0;
169 }
170
171 int __init oprofile_arch_init(struct oprofile_operations * ops)
172 {
173         return xenoprofile_init(ops);
174 }
175
176 void oprofile_arch_exit(void)
177 {
178         xenoprofile_exit();
179 }