- Update to 3.3-rc2.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31 #include <asm/compat.h>
32 #include <asm/smp.h>
33 #include <asm/alternative.h>
34
35 #include "perf_event.h"
36
37 #if 0
38 #undef wrmsrl
39 #define wrmsrl(msr, val)                                        \
40 do {                                                            \
41         trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
42                         (unsigned long)(val));                  \
43         native_write_msr((msr), (u32)((u64)(val)),              \
44                         (u32)((u64)(val) >> 32));               \
45 } while (0)
46 #endif
47
48 struct x86_pmu x86_pmu __read_mostly;
49
50 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
51         .enabled = 1,
52 };
53
54 u64 __read_mostly hw_cache_event_ids
55                                 [PERF_COUNT_HW_CACHE_MAX]
56                                 [PERF_COUNT_HW_CACHE_OP_MAX]
57                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
58 u64 __read_mostly hw_cache_extra_regs
59                                 [PERF_COUNT_HW_CACHE_MAX]
60                                 [PERF_COUNT_HW_CACHE_OP_MAX]
61                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
62
63 /*
64  * Propagate event elapsed time into the generic event.
65  * Can only be executed on the CPU where the event is active.
66  * Returns the delta events processed.
67  */
68 u64 x86_perf_event_update(struct perf_event *event)
69 {
70         struct hw_perf_event *hwc = &event->hw;
71         int shift = 64 - x86_pmu.cntval_bits;
72         u64 prev_raw_count, new_raw_count;
73         int idx = hwc->idx;
74         s64 delta;
75
76         if (idx == X86_PMC_IDX_FIXED_BTS)
77                 return 0;
78
79         /*
80          * Careful: an NMI might modify the previous event value.
81          *
82          * Our tactic to handle this is to first atomically read and
83          * exchange a new raw count - then add that new-prev delta
84          * count to the generic event atomically:
85          */
86 again:
87         prev_raw_count = local64_read(&hwc->prev_count);
88         rdmsrl(hwc->event_base, new_raw_count);
89
90         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
91                                         new_raw_count) != prev_raw_count)
92                 goto again;
93
94         /*
95          * Now we have the new raw value and have updated the prev
96          * timestamp already. We can now calculate the elapsed delta
97          * (event-)time and add that to the generic event.
98          *
99          * Careful, not all hw sign-extends above the physical width
100          * of the count.
101          */
102         delta = (new_raw_count << shift) - (prev_raw_count << shift);
103         delta >>= shift;
104
105         local64_add(delta, &event->count);
106         local64_sub(delta, &hwc->period_left);
107
108         return new_raw_count;
109 }
110
111 /*
112  * Find and validate any extra registers to set up.
113  */
114 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
115 {
116         struct hw_perf_event_extra *reg;
117         struct extra_reg *er;
118
119         reg = &event->hw.extra_reg;
120
121         if (!x86_pmu.extra_regs)
122                 return 0;
123
124         for (er = x86_pmu.extra_regs; er->msr; er++) {
125                 if (er->event != (config & er->config_mask))
126                         continue;
127                 if (event->attr.config1 & ~er->valid_mask)
128                         return -EINVAL;
129
130                 reg->idx = er->idx;
131                 reg->config = event->attr.config1;
132                 reg->reg = er->msr;
133                 break;
134         }
135         return 0;
136 }
137
138 static atomic_t active_events;
139 static DEFINE_MUTEX(pmc_reserve_mutex);
140
141 #ifdef CONFIG_X86_LOCAL_APIC
142
143 static bool reserve_pmc_hardware(void)
144 {
145         int i;
146
147         for (i = 0; i < x86_pmu.num_counters; i++) {
148                 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
149                         goto perfctr_fail;
150         }
151
152         for (i = 0; i < x86_pmu.num_counters; i++) {
153                 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
154                         goto eventsel_fail;
155         }
156
157         return true;
158
159 eventsel_fail:
160         for (i--; i >= 0; i--)
161                 release_evntsel_nmi(x86_pmu_config_addr(i));
162
163         i = x86_pmu.num_counters;
164
165 perfctr_fail:
166         for (i--; i >= 0; i--)
167                 release_perfctr_nmi(x86_pmu_event_addr(i));
168
169         return false;
170 }
171
172 static void release_pmc_hardware(void)
173 {
174         int i;
175
176         for (i = 0; i < x86_pmu.num_counters; i++) {
177                 release_perfctr_nmi(x86_pmu_event_addr(i));
178                 release_evntsel_nmi(x86_pmu_config_addr(i));
179         }
180 }
181
182 #else
183
184 static bool reserve_pmc_hardware(void) { return true; }
185 static void release_pmc_hardware(void) {}
186
187 #endif
188
189 static bool check_hw_exists(void)
190 {
191         u64 val, val_new = 0;
192         int i, reg, ret = 0;
193
194         /*
195          * Check to see if the BIOS enabled any of the counters, if so
196          * complain and bail.
197          */
198         for (i = 0; i < x86_pmu.num_counters; i++) {
199                 reg = x86_pmu_config_addr(i);
200                 ret = rdmsrl_safe(reg, &val);
201                 if (ret)
202                         goto msr_fail;
203                 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
204                         goto bios_fail;
205         }
206
207         if (x86_pmu.num_counters_fixed) {
208                 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
209                 ret = rdmsrl_safe(reg, &val);
210                 if (ret)
211                         goto msr_fail;
212                 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
213                         if (val & (0x03 << i*4))
214                                 goto bios_fail;
215                 }
216         }
217
218         /*
219          * Now write a value and read it back to see if it matches,
220          * this is needed to detect certain hardware emulators (qemu/kvm)
221          * that don't trap on the MSR access and always return 0s.
222          */
223         val = 0xabcdUL;
224         ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
225         ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
226         if (ret || val != val_new)
227                 goto msr_fail;
228
229         return true;
230
231 bios_fail:
232         /*
233          * We still allow the PMU driver to operate:
234          */
235         printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
236         printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
237
238         return true;
239
240 msr_fail:
241         printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
242
243         return false;
244 }
245
246 static void hw_perf_event_destroy(struct perf_event *event)
247 {
248         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
249                 release_pmc_hardware();
250                 release_ds_buffers();
251                 mutex_unlock(&pmc_reserve_mutex);
252         }
253 }
254
255 static inline int x86_pmu_initialized(void)
256 {
257         return x86_pmu.handle_irq != NULL;
258 }
259
260 static inline int
261 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
262 {
263         struct perf_event_attr *attr = &event->attr;
264         unsigned int cache_type, cache_op, cache_result;
265         u64 config, val;
266
267         config = attr->config;
268
269         cache_type = (config >>  0) & 0xff;
270         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
271                 return -EINVAL;
272
273         cache_op = (config >>  8) & 0xff;
274         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
275                 return -EINVAL;
276
277         cache_result = (config >> 16) & 0xff;
278         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
279                 return -EINVAL;
280
281         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
282
283         if (val == 0)
284                 return -ENOENT;
285
286         if (val == -1)
287                 return -EINVAL;
288
289         hwc->config |= val;
290         attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
291         return x86_pmu_extra_regs(val, event);
292 }
293
294 int x86_setup_perfctr(struct perf_event *event)
295 {
296         struct perf_event_attr *attr = &event->attr;
297         struct hw_perf_event *hwc = &event->hw;
298         u64 config;
299
300         if (!is_sampling_event(event)) {
301                 hwc->sample_period = x86_pmu.max_period;
302                 hwc->last_period = hwc->sample_period;
303                 local64_set(&hwc->period_left, hwc->sample_period);
304         } else {
305                 /*
306                  * If we have a PMU initialized but no APIC
307                  * interrupts, we cannot sample hardware
308                  * events (user-space has to fall back and
309                  * sample via a hrtimer based software event):
310                  */
311                 if (!x86_pmu.apic)
312                         return -EOPNOTSUPP;
313         }
314
315         if (attr->type == PERF_TYPE_RAW)
316                 return x86_pmu_extra_regs(event->attr.config, event);
317
318         if (attr->type == PERF_TYPE_HW_CACHE)
319                 return set_ext_hw_attr(hwc, event);
320
321         if (attr->config >= x86_pmu.max_events)
322                 return -EINVAL;
323
324         /*
325          * The generic map:
326          */
327         config = x86_pmu.event_map(attr->config);
328
329         if (config == 0)
330                 return -ENOENT;
331
332         if (config == -1LL)
333                 return -EINVAL;
334
335         /*
336          * Branch tracing:
337          */
338         if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
339             !attr->freq && hwc->sample_period == 1) {
340                 /* BTS is not supported by this architecture. */
341                 if (!x86_pmu.bts_active)
342                         return -EOPNOTSUPP;
343
344                 /* BTS is currently only allowed for user-mode. */
345                 if (!attr->exclude_kernel)
346                         return -EOPNOTSUPP;
347         }
348
349         hwc->config |= config;
350
351         return 0;
352 }
353
354 int x86_pmu_hw_config(struct perf_event *event)
355 {
356         if (event->attr.precise_ip) {
357                 int precise = 0;
358
359                 /* Support for constant skid */
360                 if (x86_pmu.pebs_active) {
361                         precise++;
362
363                         /* Support for IP fixup */
364                         if (x86_pmu.lbr_nr)
365                                 precise++;
366                 }
367
368                 if (event->attr.precise_ip > precise)
369                         return -EOPNOTSUPP;
370         }
371
372         /*
373          * Generate PMC IRQs:
374          * (keep 'enabled' bit clear for now)
375          */
376         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
377
378         /*
379          * Count user and OS events unless requested not to
380          */
381         if (!event->attr.exclude_user)
382                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
383         if (!event->attr.exclude_kernel)
384                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
385
386         if (event->attr.type == PERF_TYPE_RAW)
387                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
388
389         return x86_setup_perfctr(event);
390 }
391
392 /*
393  * Setup the hardware configuration for a given attr_type
394  */
395 static int __x86_pmu_event_init(struct perf_event *event)
396 {
397         int err;
398
399         if (!x86_pmu_initialized())
400                 return -ENODEV;
401
402         err = 0;
403         if (!atomic_inc_not_zero(&active_events)) {
404                 mutex_lock(&pmc_reserve_mutex);
405                 if (atomic_read(&active_events) == 0) {
406                         if (!reserve_pmc_hardware())
407                                 err = -EBUSY;
408                         else
409                                 reserve_ds_buffers();
410                 }
411                 if (!err)
412                         atomic_inc(&active_events);
413                 mutex_unlock(&pmc_reserve_mutex);
414         }
415         if (err)
416                 return err;
417
418         event->destroy = hw_perf_event_destroy;
419
420         event->hw.idx = -1;
421         event->hw.last_cpu = -1;
422         event->hw.last_tag = ~0ULL;
423
424         /* mark unused */
425         event->hw.extra_reg.idx = EXTRA_REG_NONE;
426
427         return x86_pmu.hw_config(event);
428 }
429
430 void x86_pmu_disable_all(void)
431 {
432         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
433         int idx;
434
435         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
436                 u64 val;
437
438                 if (!test_bit(idx, cpuc->active_mask))
439                         continue;
440                 rdmsrl(x86_pmu_config_addr(idx), val);
441                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
442                         continue;
443                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
444                 wrmsrl(x86_pmu_config_addr(idx), val);
445         }
446 }
447
448 static void x86_pmu_disable(struct pmu *pmu)
449 {
450         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
451
452         if (!x86_pmu_initialized())
453                 return;
454
455         if (!cpuc->enabled)
456                 return;
457
458         cpuc->n_added = 0;
459         cpuc->enabled = 0;
460         barrier();
461
462         x86_pmu.disable_all();
463 }
464
465 void x86_pmu_enable_all(int added)
466 {
467         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
468         int idx;
469
470         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
471                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
472
473                 if (!test_bit(idx, cpuc->active_mask))
474                         continue;
475
476                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
477         }
478 }
479
480 static struct pmu pmu;
481
482 static inline int is_x86_event(struct perf_event *event)
483 {
484         return event->pmu == &pmu;
485 }
486
487 /*
488  * Event scheduler state:
489  *
490  * Assign events iterating over all events and counters, beginning
491  * with events with least weights first. Keep the current iterator
492  * state in struct sched_state.
493  */
494 struct sched_state {
495         int     weight;
496         int     event;          /* event index */
497         int     counter;        /* counter index */
498         int     unassigned;     /* number of events to be assigned left */
499         unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
500 };
501
502 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
503 #define SCHED_STATES_MAX        2
504
505 struct perf_sched {
506         int                     max_weight;
507         int                     max_events;
508         struct event_constraint **constraints;
509         struct sched_state      state;
510         int                     saved_states;
511         struct sched_state      saved[SCHED_STATES_MAX];
512 };
513
514 /*
515  * Initialize interator that runs through all events and counters.
516  */
517 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
518                             int num, int wmin, int wmax)
519 {
520         int idx;
521
522         memset(sched, 0, sizeof(*sched));
523         sched->max_events       = num;
524         sched->max_weight       = wmax;
525         sched->constraints      = c;
526
527         for (idx = 0; idx < num; idx++) {
528                 if (c[idx]->weight == wmin)
529                         break;
530         }
531
532         sched->state.event      = idx;          /* start with min weight */
533         sched->state.weight     = wmin;
534         sched->state.unassigned = num;
535 }
536
537 static void perf_sched_save_state(struct perf_sched *sched)
538 {
539         if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
540                 return;
541
542         sched->saved[sched->saved_states] = sched->state;
543         sched->saved_states++;
544 }
545
546 static bool perf_sched_restore_state(struct perf_sched *sched)
547 {
548         if (!sched->saved_states)
549                 return false;
550
551         sched->saved_states--;
552         sched->state = sched->saved[sched->saved_states];
553
554         /* continue with next counter: */
555         clear_bit(sched->state.counter++, sched->state.used);
556
557         return true;
558 }
559
560 /*
561  * Select a counter for the current event to schedule. Return true on
562  * success.
563  */
564 static bool __perf_sched_find_counter(struct perf_sched *sched)
565 {
566         struct event_constraint *c;
567         int idx;
568
569         if (!sched->state.unassigned)
570                 return false;
571
572         if (sched->state.event >= sched->max_events)
573                 return false;
574
575         c = sched->constraints[sched->state.event];
576
577         /* Prefer fixed purpose counters */
578         if (x86_pmu.num_counters_fixed) {
579                 idx = X86_PMC_IDX_FIXED;
580                 for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
581                         if (!__test_and_set_bit(idx, sched->state.used))
582                                 goto done;
583                 }
584         }
585         /* Grab the first unused counter starting with idx */
586         idx = sched->state.counter;
587         for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
588                 if (!__test_and_set_bit(idx, sched->state.used))
589                         goto done;
590         }
591
592         return false;
593
594 done:
595         sched->state.counter = idx;
596
597         if (c->overlap)
598                 perf_sched_save_state(sched);
599
600         return true;
601 }
602
603 static bool perf_sched_find_counter(struct perf_sched *sched)
604 {
605         while (!__perf_sched_find_counter(sched)) {
606                 if (!perf_sched_restore_state(sched))
607                         return false;
608         }
609
610         return true;
611 }
612
613 /*
614  * Go through all unassigned events and find the next one to schedule.
615  * Take events with the least weight first. Return true on success.
616  */
617 static bool perf_sched_next_event(struct perf_sched *sched)
618 {
619         struct event_constraint *c;
620
621         if (!sched->state.unassigned || !--sched->state.unassigned)
622                 return false;
623
624         do {
625                 /* next event */
626                 sched->state.event++;
627                 if (sched->state.event >= sched->max_events) {
628                         /* next weight */
629                         sched->state.event = 0;
630                         sched->state.weight++;
631                         if (sched->state.weight > sched->max_weight)
632                                 return false;
633                 }
634                 c = sched->constraints[sched->state.event];
635         } while (c->weight != sched->state.weight);
636
637         sched->state.counter = 0;       /* start with first counter */
638
639         return true;
640 }
641
642 /*
643  * Assign a counter for each event.
644  */
645 static int perf_assign_events(struct event_constraint **constraints, int n,
646                               int wmin, int wmax, int *assign)
647 {
648         struct perf_sched sched;
649
650         perf_sched_init(&sched, constraints, n, wmin, wmax);
651
652         do {
653                 if (!perf_sched_find_counter(&sched))
654                         break;  /* failed */
655                 if (assign)
656                         assign[sched.state.event] = sched.state.counter;
657         } while (perf_sched_next_event(&sched));
658
659         return sched.state.unassigned;
660 }
661
662 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
663 {
664         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
665         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
666         int i, wmin, wmax, num = 0;
667         struct hw_perf_event *hwc;
668
669         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
670
671         for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
672                 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
673                 constraints[i] = c;
674                 wmin = min(wmin, c->weight);
675                 wmax = max(wmax, c->weight);
676         }
677
678         /*
679          * fastpath, try to reuse previous register
680          */
681         for (i = 0; i < n; i++) {
682                 hwc = &cpuc->event_list[i]->hw;
683                 c = constraints[i];
684
685                 /* never assigned */
686                 if (hwc->idx == -1)
687                         break;
688
689                 /* constraint still honored */
690                 if (!test_bit(hwc->idx, c->idxmsk))
691                         break;
692
693                 /* not already used */
694                 if (test_bit(hwc->idx, used_mask))
695                         break;
696
697                 __set_bit(hwc->idx, used_mask);
698                 if (assign)
699                         assign[i] = hwc->idx;
700         }
701
702         /* slow path */
703         if (i != n)
704                 num = perf_assign_events(constraints, n, wmin, wmax, assign);
705
706         /*
707          * scheduling failed or is just a simulation,
708          * free resources if necessary
709          */
710         if (!assign || num) {
711                 for (i = 0; i < n; i++) {
712                         if (x86_pmu.put_event_constraints)
713                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
714                 }
715         }
716         return num ? -EINVAL : 0;
717 }
718
719 /*
720  * dogrp: true if must collect siblings events (group)
721  * returns total number of events and error code
722  */
723 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
724 {
725         struct perf_event *event;
726         int n, max_count;
727
728         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
729
730         /* current number of events already accepted */
731         n = cpuc->n_events;
732
733         if (is_x86_event(leader)) {
734                 if (n >= max_count)
735                         return -EINVAL;
736                 cpuc->event_list[n] = leader;
737                 n++;
738         }
739         if (!dogrp)
740                 return n;
741
742         list_for_each_entry(event, &leader->sibling_list, group_entry) {
743                 if (!is_x86_event(event) ||
744                     event->state <= PERF_EVENT_STATE_OFF)
745                         continue;
746
747                 if (n >= max_count)
748                         return -EINVAL;
749
750                 cpuc->event_list[n] = event;
751                 n++;
752         }
753         return n;
754 }
755
756 static inline void x86_assign_hw_event(struct perf_event *event,
757                                 struct cpu_hw_events *cpuc, int i)
758 {
759         struct hw_perf_event *hwc = &event->hw;
760
761         hwc->idx = cpuc->assign[i];
762         hwc->last_cpu = smp_processor_id();
763         hwc->last_tag = ++cpuc->tags[i];
764
765         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
766                 hwc->config_base = 0;
767                 hwc->event_base = 0;
768         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
769                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
770                 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
771         } else {
772                 hwc->config_base = x86_pmu_config_addr(hwc->idx);
773                 hwc->event_base  = x86_pmu_event_addr(hwc->idx);
774         }
775 }
776
777 static inline int match_prev_assignment(struct hw_perf_event *hwc,
778                                         struct cpu_hw_events *cpuc,
779                                         int i)
780 {
781         return hwc->idx == cpuc->assign[i] &&
782                 hwc->last_cpu == smp_processor_id() &&
783                 hwc->last_tag == cpuc->tags[i];
784 }
785
786 static void x86_pmu_start(struct perf_event *event, int flags);
787
788 static void x86_pmu_enable(struct pmu *pmu)
789 {
790         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
791         struct perf_event *event;
792         struct hw_perf_event *hwc;
793         int i, added = cpuc->n_added;
794
795         if (!x86_pmu_initialized())
796                 return;
797
798         if (cpuc->enabled)
799                 return;
800
801         if (cpuc->n_added) {
802                 int n_running = cpuc->n_events - cpuc->n_added;
803                 /*
804                  * apply assignment obtained either from
805                  * hw_perf_group_sched_in() or x86_pmu_enable()
806                  *
807                  * step1: save events moving to new counters
808                  * step2: reprogram moved events into new counters
809                  */
810                 for (i = 0; i < n_running; i++) {
811                         event = cpuc->event_list[i];
812                         hwc = &event->hw;
813
814                         /*
815                          * we can avoid reprogramming counter if:
816                          * - assigned same counter as last time
817                          * - running on same CPU as last time
818                          * - no other event has used the counter since
819                          */
820                         if (hwc->idx == -1 ||
821                             match_prev_assignment(hwc, cpuc, i))
822                                 continue;
823
824                         /*
825                          * Ensure we don't accidentally enable a stopped
826                          * counter simply because we rescheduled.
827                          */
828                         if (hwc->state & PERF_HES_STOPPED)
829                                 hwc->state |= PERF_HES_ARCH;
830
831                         x86_pmu_stop(event, PERF_EF_UPDATE);
832                 }
833
834                 for (i = 0; i < cpuc->n_events; i++) {
835                         event = cpuc->event_list[i];
836                         hwc = &event->hw;
837
838                         if (!match_prev_assignment(hwc, cpuc, i))
839                                 x86_assign_hw_event(event, cpuc, i);
840                         else if (i < n_running)
841                                 continue;
842
843                         if (hwc->state & PERF_HES_ARCH)
844                                 continue;
845
846                         x86_pmu_start(event, PERF_EF_RELOAD);
847                 }
848                 cpuc->n_added = 0;
849                 perf_events_lapic_init();
850         }
851
852         cpuc->enabled = 1;
853         barrier();
854
855         x86_pmu.enable_all(added);
856 }
857
858 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
859
860 /*
861  * Set the next IRQ period, based on the hwc->period_left value.
862  * To be called with the event disabled in hw:
863  */
864 int x86_perf_event_set_period(struct perf_event *event)
865 {
866         struct hw_perf_event *hwc = &event->hw;
867         s64 left = local64_read(&hwc->period_left);
868         s64 period = hwc->sample_period;
869         int ret = 0, idx = hwc->idx;
870
871         if (idx == X86_PMC_IDX_FIXED_BTS)
872                 return 0;
873
874         /*
875          * If we are way outside a reasonable range then just skip forward:
876          */
877         if (unlikely(left <= -period)) {
878                 left = period;
879                 local64_set(&hwc->period_left, left);
880                 hwc->last_period = period;
881                 ret = 1;
882         }
883
884         if (unlikely(left <= 0)) {
885                 left += period;
886                 local64_set(&hwc->period_left, left);
887                 hwc->last_period = period;
888                 ret = 1;
889         }
890         /*
891          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
892          */
893         if (unlikely(left < 2))
894                 left = 2;
895
896         if (left > x86_pmu.max_period)
897                 left = x86_pmu.max_period;
898
899         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
900
901         /*
902          * The hw event starts counting from this event offset,
903          * mark it to be able to extra future deltas:
904          */
905         local64_set(&hwc->prev_count, (u64)-left);
906
907         wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
908
909         /*
910          * Due to erratum on certan cpu we need
911          * a second write to be sure the register
912          * is updated properly
913          */
914         if (x86_pmu.perfctr_second_write) {
915                 wrmsrl(hwc->event_base,
916                         (u64)(-left) & x86_pmu.cntval_mask);
917         }
918
919         perf_event_update_userpage(event);
920
921         return ret;
922 }
923
924 void x86_pmu_enable_event(struct perf_event *event)
925 {
926         if (__this_cpu_read(cpu_hw_events.enabled))
927                 __x86_pmu_enable_event(&event->hw,
928                                        ARCH_PERFMON_EVENTSEL_ENABLE);
929 }
930
931 /*
932  * Add a single event to the PMU.
933  *
934  * The event is added to the group of enabled events
935  * but only if it can be scehduled with existing events.
936  */
937 static int x86_pmu_add(struct perf_event *event, int flags)
938 {
939         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
940         struct hw_perf_event *hwc;
941         int assign[X86_PMC_IDX_MAX];
942         int n, n0, ret;
943
944         hwc = &event->hw;
945
946         perf_pmu_disable(event->pmu);
947         n0 = cpuc->n_events;
948         ret = n = collect_events(cpuc, event, false);
949         if (ret < 0)
950                 goto out;
951
952         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
953         if (!(flags & PERF_EF_START))
954                 hwc->state |= PERF_HES_ARCH;
955
956         /*
957          * If group events scheduling transaction was started,
958          * skip the schedulability test here, it will be performed
959          * at commit time (->commit_txn) as a whole
960          */
961         if (cpuc->group_flag & PERF_EVENT_TXN)
962                 goto done_collect;
963
964         ret = x86_pmu.schedule_events(cpuc, n, assign);
965         if (ret)
966                 goto out;
967         /*
968          * copy new assignment, now we know it is possible
969          * will be used by hw_perf_enable()
970          */
971         memcpy(cpuc->assign, assign, n*sizeof(int));
972
973 done_collect:
974         cpuc->n_events = n;
975         cpuc->n_added += n - n0;
976         cpuc->n_txn += n - n0;
977
978         ret = 0;
979 out:
980         perf_pmu_enable(event->pmu);
981         return ret;
982 }
983
984 static void x86_pmu_start(struct perf_event *event, int flags)
985 {
986         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
987         int idx = event->hw.idx;
988
989         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
990                 return;
991
992         if (WARN_ON_ONCE(idx == -1))
993                 return;
994
995         if (flags & PERF_EF_RELOAD) {
996                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
997                 x86_perf_event_set_period(event);
998         }
999
1000         event->hw.state = 0;
1001
1002         cpuc->events[idx] = event;
1003         __set_bit(idx, cpuc->active_mask);
1004         __set_bit(idx, cpuc->running);
1005         x86_pmu.enable(event);
1006         perf_event_update_userpage(event);
1007 }
1008
1009 void perf_event_print_debug(void)
1010 {
1011         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1012         u64 pebs;
1013         struct cpu_hw_events *cpuc;
1014         unsigned long flags;
1015         int cpu, idx;
1016
1017         if (!x86_pmu.num_counters)
1018                 return;
1019
1020         local_irq_save(flags);
1021
1022         cpu = smp_processor_id();
1023         cpuc = &per_cpu(cpu_hw_events, cpu);
1024
1025         if (x86_pmu.version >= 2) {
1026                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1027                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1028                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1029                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1030                 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1031
1032                 pr_info("\n");
1033                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1034                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1035                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1036                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1037                 pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1038         }
1039         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1040
1041         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1042                 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1043                 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1044
1045                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1046
1047                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1048                         cpu, idx, pmc_ctrl);
1049                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1050                         cpu, idx, pmc_count);
1051                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1052                         cpu, idx, prev_left);
1053         }
1054         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1055                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1056
1057                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1058                         cpu, idx, pmc_count);
1059         }
1060         local_irq_restore(flags);
1061 }
1062
1063 void x86_pmu_stop(struct perf_event *event, int flags)
1064 {
1065         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1066         struct hw_perf_event *hwc = &event->hw;
1067
1068         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1069                 x86_pmu.disable(event);
1070                 cpuc->events[hwc->idx] = NULL;
1071                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1072                 hwc->state |= PERF_HES_STOPPED;
1073         }
1074
1075         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1076                 /*
1077                  * Drain the remaining delta count out of a event
1078                  * that we are disabling:
1079                  */
1080                 x86_perf_event_update(event);
1081                 hwc->state |= PERF_HES_UPTODATE;
1082         }
1083 }
1084
1085 static void x86_pmu_del(struct perf_event *event, int flags)
1086 {
1087         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1088         int i;
1089
1090         /*
1091          * If we're called during a txn, we don't need to do anything.
1092          * The events never got scheduled and ->cancel_txn will truncate
1093          * the event_list.
1094          */
1095         if (cpuc->group_flag & PERF_EVENT_TXN)
1096                 return;
1097
1098         x86_pmu_stop(event, PERF_EF_UPDATE);
1099
1100         for (i = 0; i < cpuc->n_events; i++) {
1101                 if (event == cpuc->event_list[i]) {
1102
1103                         if (x86_pmu.put_event_constraints)
1104                                 x86_pmu.put_event_constraints(cpuc, event);
1105
1106                         while (++i < cpuc->n_events)
1107                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1108
1109                         --cpuc->n_events;
1110                         break;
1111                 }
1112         }
1113         perf_event_update_userpage(event);
1114 }
1115
1116 int x86_pmu_handle_irq(struct pt_regs *regs)
1117 {
1118         struct perf_sample_data data;
1119         struct cpu_hw_events *cpuc;
1120         struct perf_event *event;
1121         int idx, handled = 0;
1122         u64 val;
1123
1124         perf_sample_data_init(&data, 0);
1125
1126         cpuc = &__get_cpu_var(cpu_hw_events);
1127
1128         /*
1129          * Some chipsets need to unmask the LVTPC in a particular spot
1130          * inside the nmi handler.  As a result, the unmasking was pushed
1131          * into all the nmi handlers.
1132          *
1133          * This generic handler doesn't seem to have any issues where the
1134          * unmasking occurs so it was left at the top.
1135          */
1136         apic_write(APIC_LVTPC, APIC_DM_NMI);
1137
1138         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1139                 if (!test_bit(idx, cpuc->active_mask)) {
1140                         /*
1141                          * Though we deactivated the counter some cpus
1142                          * might still deliver spurious interrupts still
1143                          * in flight. Catch them:
1144                          */
1145                         if (__test_and_clear_bit(idx, cpuc->running))
1146                                 handled++;
1147                         continue;
1148                 }
1149
1150                 event = cpuc->events[idx];
1151
1152                 val = x86_perf_event_update(event);
1153                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1154                         continue;
1155
1156                 /*
1157                  * event overflow
1158                  */
1159                 handled++;
1160                 data.period     = event->hw.last_period;
1161
1162                 if (!x86_perf_event_set_period(event))
1163                         continue;
1164
1165                 if (perf_event_overflow(event, &data, regs))
1166                         x86_pmu_stop(event, 0);
1167         }
1168
1169         if (handled)
1170                 inc_irq_stat(apic_perf_irqs);
1171
1172         return handled;
1173 }
1174
1175 void perf_events_lapic_init(void)
1176 {
1177         if (!x86_pmu.apic || !x86_pmu_initialized())
1178                 return;
1179
1180         /*
1181          * Always use NMI for PMU
1182          */
1183         apic_write(APIC_LVTPC, APIC_DM_NMI);
1184 }
1185
1186 static int __kprobes
1187 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1188 {
1189         if (!atomic_read(&active_events))
1190                 return NMI_DONE;
1191
1192         return x86_pmu.handle_irq(regs);
1193 }
1194
1195 struct event_constraint emptyconstraint;
1196 struct event_constraint unconstrained;
1197
1198 static int __cpuinit
1199 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1200 {
1201         unsigned int cpu = (long)hcpu;
1202         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1203         int ret = NOTIFY_OK;
1204
1205         switch (action & ~CPU_TASKS_FROZEN) {
1206         case CPU_UP_PREPARE:
1207                 cpuc->kfree_on_online = NULL;
1208                 if (x86_pmu.cpu_prepare)
1209                         ret = x86_pmu.cpu_prepare(cpu);
1210                 break;
1211
1212         case CPU_STARTING:
1213                 if (x86_pmu.cpu_starting)
1214                         x86_pmu.cpu_starting(cpu);
1215                 break;
1216
1217         case CPU_ONLINE:
1218                 kfree(cpuc->kfree_on_online);
1219                 break;
1220
1221         case CPU_DYING:
1222                 if (x86_pmu.cpu_dying)
1223                         x86_pmu.cpu_dying(cpu);
1224                 break;
1225
1226         case CPU_UP_CANCELED:
1227         case CPU_DEAD:
1228                 if (x86_pmu.cpu_dead)
1229                         x86_pmu.cpu_dead(cpu);
1230                 break;
1231
1232         default:
1233                 break;
1234         }
1235
1236         return ret;
1237 }
1238
1239 static void __init pmu_check_apic(void)
1240 {
1241         if (cpu_has_apic)
1242                 return;
1243
1244         x86_pmu.apic = 0;
1245         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1246         pr_info("no hardware sampling interrupt available.\n");
1247 }
1248
1249 static int __init init_hw_perf_events(void)
1250 {
1251         struct x86_pmu_quirk *quirk;
1252         struct event_constraint *c;
1253         int err;
1254
1255         pr_info("Performance Events: ");
1256
1257         switch (boot_cpu_data.x86_vendor) {
1258         case X86_VENDOR_INTEL:
1259                 err = intel_pmu_init();
1260                 break;
1261         case X86_VENDOR_AMD:
1262                 err = amd_pmu_init();
1263                 break;
1264         default:
1265                 return 0;
1266         }
1267         if (err != 0) {
1268                 pr_cont("no PMU driver, software events only.\n");
1269                 return 0;
1270         }
1271
1272         pmu_check_apic();
1273
1274         /* sanity check that the hardware exists or is emulated */
1275         if (!check_hw_exists())
1276                 return 0;
1277
1278         pr_cont("%s PMU driver.\n", x86_pmu.name);
1279
1280         for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1281                 quirk->func();
1282
1283         if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1284                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1285                      x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1286                 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1287         }
1288         x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1289
1290         if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1291                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1292                      x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1293                 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1294         }
1295
1296         x86_pmu.intel_ctrl |=
1297                 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1298
1299         perf_events_lapic_init();
1300         register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1301
1302         unconstrained = (struct event_constraint)
1303                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1304                                    0, x86_pmu.num_counters, 0);
1305
1306         if (x86_pmu.event_constraints) {
1307                 /*
1308                  * event on fixed counter2 (REF_CYCLES) only works on this
1309                  * counter, so do not extend mask to generic counters
1310                  */
1311                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1312                         if (c->cmask != X86_RAW_EVENT_MASK
1313                             || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
1314                                 continue;
1315                         }
1316
1317                         c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1318                         c->weight += x86_pmu.num_counters;
1319                 }
1320         }
1321
1322         pr_info("... version:                %d\n",     x86_pmu.version);
1323         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1324         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1325         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1326         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1327         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1328         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1329
1330         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1331         perf_cpu_notifier(x86_pmu_notifier);
1332
1333         return 0;
1334 }
1335 early_initcall(init_hw_perf_events);
1336
1337 static inline void x86_pmu_read(struct perf_event *event)
1338 {
1339         x86_perf_event_update(event);
1340 }
1341
1342 /*
1343  * Start group events scheduling transaction
1344  * Set the flag to make pmu::enable() not perform the
1345  * schedulability test, it will be performed at commit time
1346  */
1347 static void x86_pmu_start_txn(struct pmu *pmu)
1348 {
1349         perf_pmu_disable(pmu);
1350         __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1351         __this_cpu_write(cpu_hw_events.n_txn, 0);
1352 }
1353
1354 /*
1355  * Stop group events scheduling transaction
1356  * Clear the flag and pmu::enable() will perform the
1357  * schedulability test.
1358  */
1359 static void x86_pmu_cancel_txn(struct pmu *pmu)
1360 {
1361         __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1362         /*
1363          * Truncate the collected events.
1364          */
1365         __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1366         __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1367         perf_pmu_enable(pmu);
1368 }
1369
1370 /*
1371  * Commit group events scheduling transaction
1372  * Perform the group schedulability test as a whole
1373  * Return 0 if success
1374  */
1375 static int x86_pmu_commit_txn(struct pmu *pmu)
1376 {
1377         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1378         int assign[X86_PMC_IDX_MAX];
1379         int n, ret;
1380
1381         n = cpuc->n_events;
1382
1383         if (!x86_pmu_initialized())
1384                 return -EAGAIN;
1385
1386         ret = x86_pmu.schedule_events(cpuc, n, assign);
1387         if (ret)
1388                 return ret;
1389
1390         /*
1391          * copy new assignment, now we know it is possible
1392          * will be used by hw_perf_enable()
1393          */
1394         memcpy(cpuc->assign, assign, n*sizeof(int));
1395
1396         cpuc->group_flag &= ~PERF_EVENT_TXN;
1397         perf_pmu_enable(pmu);
1398         return 0;
1399 }
1400 /*
1401  * a fake_cpuc is used to validate event groups. Due to
1402  * the extra reg logic, we need to also allocate a fake
1403  * per_core and per_cpu structure. Otherwise, group events
1404  * using extra reg may conflict without the kernel being
1405  * able to catch this when the last event gets added to
1406  * the group.
1407  */
1408 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1409 {
1410         kfree(cpuc->shared_regs);
1411         kfree(cpuc);
1412 }
1413
1414 static struct cpu_hw_events *allocate_fake_cpuc(void)
1415 {
1416         struct cpu_hw_events *cpuc;
1417         int cpu = raw_smp_processor_id();
1418
1419         cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1420         if (!cpuc)
1421                 return ERR_PTR(-ENOMEM);
1422
1423         /* only needed, if we have extra_regs */
1424         if (x86_pmu.extra_regs) {
1425                 cpuc->shared_regs = allocate_shared_regs(cpu);
1426                 if (!cpuc->shared_regs)
1427                         goto error;
1428         }
1429         return cpuc;
1430 error:
1431         free_fake_cpuc(cpuc);
1432         return ERR_PTR(-ENOMEM);
1433 }
1434
1435 /*
1436  * validate that we can schedule this event
1437  */
1438 static int validate_event(struct perf_event *event)
1439 {
1440         struct cpu_hw_events *fake_cpuc;
1441         struct event_constraint *c;
1442         int ret = 0;
1443
1444         fake_cpuc = allocate_fake_cpuc();
1445         if (IS_ERR(fake_cpuc))
1446                 return PTR_ERR(fake_cpuc);
1447
1448         c = x86_pmu.get_event_constraints(fake_cpuc, event);
1449
1450         if (!c || !c->weight)
1451                 ret = -EINVAL;
1452
1453         if (x86_pmu.put_event_constraints)
1454                 x86_pmu.put_event_constraints(fake_cpuc, event);
1455
1456         free_fake_cpuc(fake_cpuc);
1457
1458         return ret;
1459 }
1460
1461 /*
1462  * validate a single event group
1463  *
1464  * validation include:
1465  *      - check events are compatible which each other
1466  *      - events do not compete for the same counter
1467  *      - number of events <= number of counters
1468  *
1469  * validation ensures the group can be loaded onto the
1470  * PMU if it was the only group available.
1471  */
1472 static int validate_group(struct perf_event *event)
1473 {
1474         struct perf_event *leader = event->group_leader;
1475         struct cpu_hw_events *fake_cpuc;
1476         int ret = -EINVAL, n;
1477
1478         fake_cpuc = allocate_fake_cpuc();
1479         if (IS_ERR(fake_cpuc))
1480                 return PTR_ERR(fake_cpuc);
1481         /*
1482          * the event is not yet connected with its
1483          * siblings therefore we must first collect
1484          * existing siblings, then add the new event
1485          * before we can simulate the scheduling
1486          */
1487         n = collect_events(fake_cpuc, leader, true);
1488         if (n < 0)
1489                 goto out;
1490
1491         fake_cpuc->n_events = n;
1492         n = collect_events(fake_cpuc, event, false);
1493         if (n < 0)
1494                 goto out;
1495
1496         fake_cpuc->n_events = n;
1497
1498         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1499
1500 out:
1501         free_fake_cpuc(fake_cpuc);
1502         return ret;
1503 }
1504
1505 static int x86_pmu_event_init(struct perf_event *event)
1506 {
1507         struct pmu *tmp;
1508         int err;
1509
1510         switch (event->attr.type) {
1511         case PERF_TYPE_RAW:
1512         case PERF_TYPE_HARDWARE:
1513         case PERF_TYPE_HW_CACHE:
1514                 break;
1515
1516         default:
1517                 return -ENOENT;
1518         }
1519
1520         err = __x86_pmu_event_init(event);
1521         if (!err) {
1522                 /*
1523                  * we temporarily connect event to its pmu
1524                  * such that validate_group() can classify
1525                  * it as an x86 event using is_x86_event()
1526                  */
1527                 tmp = event->pmu;
1528                 event->pmu = &pmu;
1529
1530                 if (event->group_leader != event)
1531                         err = validate_group(event);
1532                 else
1533                         err = validate_event(event);
1534
1535                 event->pmu = tmp;
1536         }
1537         if (err) {
1538                 if (event->destroy)
1539                         event->destroy(event);
1540         }
1541
1542         return err;
1543 }
1544
1545 static struct pmu pmu = {
1546         .pmu_enable     = x86_pmu_enable,
1547         .pmu_disable    = x86_pmu_disable,
1548
1549         .event_init     = x86_pmu_event_init,
1550
1551         .add            = x86_pmu_add,
1552         .del            = x86_pmu_del,
1553         .start          = x86_pmu_start,
1554         .stop           = x86_pmu_stop,
1555         .read           = x86_pmu_read,
1556
1557         .start_txn      = x86_pmu_start_txn,
1558         .cancel_txn     = x86_pmu_cancel_txn,
1559         .commit_txn     = x86_pmu_commit_txn,
1560 };
1561
1562 /*
1563  * callchain support
1564  */
1565
1566 static void
1567 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1568 {
1569         /* Ignore warnings */
1570 }
1571
1572 static void backtrace_warning(void *data, char *msg)
1573 {
1574         /* Ignore warnings */
1575 }
1576
1577 static int backtrace_stack(void *data, char *name)
1578 {
1579         return 0;
1580 }
1581
1582 static void backtrace_address(void *data, unsigned long addr, int reliable)
1583 {
1584         struct perf_callchain_entry *entry = data;
1585
1586         perf_callchain_store(entry, addr);
1587 }
1588
1589 static const struct stacktrace_ops backtrace_ops = {
1590         .warning                = backtrace_warning,
1591         .warning_symbol         = backtrace_warning_symbol,
1592         .stack                  = backtrace_stack,
1593         .address                = backtrace_address,
1594         .walk_stack             = print_context_stack_bp,
1595 };
1596
1597 void
1598 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1599 {
1600         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1601                 /* TODO: We don't support guest os callchain now */
1602                 return;
1603         }
1604
1605         perf_callchain_store(entry, regs->ip);
1606
1607         dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1608 }
1609
1610 #ifdef CONFIG_COMPAT
1611 static inline int
1612 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1613 {
1614         /* 32-bit process in 64-bit kernel. */
1615         struct stack_frame_ia32 frame;
1616         const void __user *fp;
1617
1618         if (!test_thread_flag(TIF_IA32))
1619                 return 0;
1620
1621         fp = compat_ptr(regs->bp);
1622         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1623                 unsigned long bytes;
1624                 frame.next_frame     = 0;
1625                 frame.return_address = 0;
1626
1627                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1628                 if (bytes != sizeof(frame))
1629                         break;
1630
1631                 if (fp < compat_ptr(regs->sp))
1632                         break;
1633
1634                 perf_callchain_store(entry, frame.return_address);
1635                 fp = compat_ptr(frame.next_frame);
1636         }
1637         return 1;
1638 }
1639 #else
1640 static inline int
1641 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1642 {
1643     return 0;
1644 }
1645 #endif
1646
1647 void
1648 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1649 {
1650         struct stack_frame frame;
1651         const void __user *fp;
1652
1653         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1654                 /* TODO: We don't support guest os callchain now */
1655                 return;
1656         }
1657
1658         fp = (void __user *)regs->bp;
1659
1660         perf_callchain_store(entry, regs->ip);
1661
1662         if (!current->mm)
1663                 return;
1664
1665         if (perf_callchain_user32(regs, entry))
1666                 return;
1667
1668         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1669                 unsigned long bytes;
1670                 frame.next_frame             = NULL;
1671                 frame.return_address = 0;
1672
1673                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1674                 if (bytes != sizeof(frame))
1675                         break;
1676
1677                 if ((unsigned long)fp < regs->sp)
1678                         break;
1679
1680                 perf_callchain_store(entry, frame.return_address);
1681                 fp = frame.next_frame;
1682         }
1683 }
1684
1685 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1686 {
1687         unsigned long ip;
1688
1689         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1690                 ip = perf_guest_cbs->get_guest_ip();
1691         else
1692                 ip = instruction_pointer(regs);
1693
1694         return ip;
1695 }
1696
1697 unsigned long perf_misc_flags(struct pt_regs *regs)
1698 {
1699         int misc = 0;
1700
1701         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1702                 if (perf_guest_cbs->is_user_mode())
1703                         misc |= PERF_RECORD_MISC_GUEST_USER;
1704                 else
1705                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1706         } else {
1707                 if (user_mode(regs))
1708                         misc |= PERF_RECORD_MISC_USER;
1709                 else
1710                         misc |= PERF_RECORD_MISC_KERNEL;
1711         }
1712
1713         if (regs->flags & PERF_EFLAGS_EXACT)
1714                 misc |= PERF_RECORD_MISC_EXACT_IP;
1715
1716         return misc;
1717 }
1718
1719 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
1720 {
1721         cap->version            = x86_pmu.version;
1722         cap->num_counters_gp    = x86_pmu.num_counters;
1723         cap->num_counters_fixed = x86_pmu.num_counters_fixed;
1724         cap->bit_width_gp       = x86_pmu.cntval_bits;
1725         cap->bit_width_fixed    = x86_pmu.cntval_bits;
1726         cap->events_mask        = (unsigned int)x86_pmu.events_maskl;
1727         cap->events_mask_len    = x86_pmu.events_mask_len;
1728 }
1729 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);