2 * ARMv5 [xscale] Performance counter handling code.
4 * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
6 * Based on the previous xscale OProfile code.
8 * There are two variants of the xscale PMU that we support:
9 * - xscale1pmu: 2 event counters and a cycle counter
10 * - xscale2pmu: 4 event counters and a cycle counter
11 * The two variants share event definitions, but have different
15 #ifdef CONFIG_CPU_XSCALE
16 enum xscale_perf_types {
17 XSCALE_PERFCTR_ICACHE_MISS = 0x00,
18 XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
19 XSCALE_PERFCTR_DATA_STALL = 0x02,
20 XSCALE_PERFCTR_ITLB_MISS = 0x03,
21 XSCALE_PERFCTR_DTLB_MISS = 0x04,
22 XSCALE_PERFCTR_BRANCH = 0x05,
23 XSCALE_PERFCTR_BRANCH_MISS = 0x06,
24 XSCALE_PERFCTR_INSTRUCTION = 0x07,
25 XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
26 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
27 XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
28 XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
29 XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
30 XSCALE_PERFCTR_PC_CHANGED = 0x0D,
31 XSCALE_PERFCTR_BCU_REQUEST = 0x10,
32 XSCALE_PERFCTR_BCU_FULL = 0x11,
33 XSCALE_PERFCTR_BCU_DRAIN = 0x12,
34 XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
35 XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
36 XSCALE_PERFCTR_RMW = 0x16,
37 /* XSCALE_PERFCTR_CCNT is not hardware defined */
38 XSCALE_PERFCTR_CCNT = 0xFE,
39 XSCALE_PERFCTR_UNUSED = 0xFF,
42 enum xscale_counters {
43 XSCALE_CYCLE_COUNTER = 0,
50 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
51 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
52 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
53 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
54 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
56 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
57 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
58 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
59 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
62 static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
63 [PERF_COUNT_HW_CACHE_OP_MAX]
64 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
67 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
68 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
71 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
72 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
75 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
76 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
81 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
82 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
86 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
90 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
95 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
96 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
99 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
100 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
103 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
104 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
109 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
110 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
113 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
114 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
117 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
118 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
123 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
124 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
127 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
128 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
131 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
132 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
137 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
138 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
141 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
142 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
145 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
146 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
151 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
152 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
155 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
156 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
159 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
160 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
165 #define XSCALE_PMU_ENABLE 0x001
166 #define XSCALE_PMN_RESET 0x002
167 #define XSCALE_CCNT_RESET 0x004
168 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
169 #define XSCALE_PMU_CNT64 0x008
171 #define XSCALE1_OVERFLOWED_MASK 0x700
172 #define XSCALE1_CCOUNT_OVERFLOW 0x400
173 #define XSCALE1_COUNT0_OVERFLOW 0x100
174 #define XSCALE1_COUNT1_OVERFLOW 0x200
175 #define XSCALE1_CCOUNT_INT_EN 0x040
176 #define XSCALE1_COUNT0_INT_EN 0x010
177 #define XSCALE1_COUNT1_INT_EN 0x020
178 #define XSCALE1_COUNT0_EVT_SHFT 12
179 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
180 #define XSCALE1_COUNT1_EVT_SHFT 20
181 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
184 xscale1pmu_read_pmnc(void)
187 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
192 xscale1pmu_write_pmnc(u32 val)
194 /* upper 4bits and 7, 11 are write-as-0 */
196 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
200 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
201 enum xscale_counters counter)
206 case XSCALE_CYCLE_COUNTER:
207 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
209 case XSCALE_COUNTER0:
210 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
212 case XSCALE_COUNTER1:
213 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
216 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
223 xscale1pmu_handle_irq(int irq_num, void *dev)
226 struct perf_sample_data data;
227 struct pmu_hw_events *cpuc;
228 struct pt_regs *regs;
232 * NOTE: there's an A stepping erratum that states if an overflow
233 * bit already exists and another occurs, the previous
234 * Overflow bit gets cleared. There's no workaround.
235 * Fixed in B stepping or later.
237 pmnc = xscale1pmu_read_pmnc();
240 * Write the value back to clear the overflow flags. Overflow
241 * flags remain in pmnc for use below. We also disable the PMU
242 * while we process the interrupt.
244 xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
246 if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
249 regs = get_irq_regs();
251 perf_sample_data_init(&data, 0);
253 cpuc = &__get_cpu_var(cpu_hw_events);
254 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
255 struct perf_event *event = cpuc->events[idx];
256 struct hw_perf_event *hwc;
258 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
262 armpmu_event_update(event, hwc, idx, 1);
263 data.period = event->hw.last_period;
264 if (!armpmu_event_set_period(event, hwc, idx))
267 if (perf_event_overflow(event, &data, regs))
268 cpu_pmu->disable(hwc, idx);
276 pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
277 xscale1pmu_write_pmnc(pmnc);
283 xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
285 unsigned long val, mask, evt, flags;
286 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
289 case XSCALE_CYCLE_COUNTER:
291 evt = XSCALE1_CCOUNT_INT_EN;
293 case XSCALE_COUNTER0:
294 mask = XSCALE1_COUNT0_EVT_MASK;
295 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
296 XSCALE1_COUNT0_INT_EN;
298 case XSCALE_COUNTER1:
299 mask = XSCALE1_COUNT1_EVT_MASK;
300 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
301 XSCALE1_COUNT1_INT_EN;
304 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
308 raw_spin_lock_irqsave(&events->pmu_lock, flags);
309 val = xscale1pmu_read_pmnc();
312 xscale1pmu_write_pmnc(val);
313 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
317 xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
319 unsigned long val, mask, evt, flags;
320 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
323 case XSCALE_CYCLE_COUNTER:
324 mask = XSCALE1_CCOUNT_INT_EN;
327 case XSCALE_COUNTER0:
328 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
329 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
331 case XSCALE_COUNTER1:
332 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
333 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
336 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
340 raw_spin_lock_irqsave(&events->pmu_lock, flags);
341 val = xscale1pmu_read_pmnc();
344 xscale1pmu_write_pmnc(val);
345 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
349 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
350 struct hw_perf_event *event)
352 if (XSCALE_PERFCTR_CCNT == event->config_base) {
353 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
356 return XSCALE_CYCLE_COUNTER;
358 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
359 return XSCALE_COUNTER1;
361 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
362 return XSCALE_COUNTER0;
369 xscale1pmu_start(void)
371 unsigned long flags, val;
372 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
374 raw_spin_lock_irqsave(&events->pmu_lock, flags);
375 val = xscale1pmu_read_pmnc();
376 val |= XSCALE_PMU_ENABLE;
377 xscale1pmu_write_pmnc(val);
378 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
382 xscale1pmu_stop(void)
384 unsigned long flags, val;
385 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
387 raw_spin_lock_irqsave(&events->pmu_lock, flags);
388 val = xscale1pmu_read_pmnc();
389 val &= ~XSCALE_PMU_ENABLE;
390 xscale1pmu_write_pmnc(val);
391 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
395 xscale1pmu_read_counter(int counter)
400 case XSCALE_CYCLE_COUNTER:
401 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
403 case XSCALE_COUNTER0:
404 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
406 case XSCALE_COUNTER1:
407 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
415 xscale1pmu_write_counter(int counter, u32 val)
418 case XSCALE_CYCLE_COUNTER:
419 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
421 case XSCALE_COUNTER0:
422 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
424 case XSCALE_COUNTER1:
425 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
430 static int xscale_map_event(struct perf_event *event)
432 return map_cpu_event(event, &xscale_perf_map,
433 &xscale_perf_cache_map, 0xFF);
436 static struct arm_pmu xscale1pmu = {
437 .id = ARM_PERF_PMU_ID_XSCALE1,
439 .handle_irq = xscale1pmu_handle_irq,
440 .enable = xscale1pmu_enable_event,
441 .disable = xscale1pmu_disable_event,
442 .read_counter = xscale1pmu_read_counter,
443 .write_counter = xscale1pmu_write_counter,
444 .get_event_idx = xscale1pmu_get_event_idx,
445 .start = xscale1pmu_start,
446 .stop = xscale1pmu_stop,
447 .map_event = xscale_map_event,
449 .max_period = (1LLU << 32) - 1,
452 static struct arm_pmu *__init xscale1pmu_init(void)
457 #define XSCALE2_OVERFLOWED_MASK 0x01f
458 #define XSCALE2_CCOUNT_OVERFLOW 0x001
459 #define XSCALE2_COUNT0_OVERFLOW 0x002
460 #define XSCALE2_COUNT1_OVERFLOW 0x004
461 #define XSCALE2_COUNT2_OVERFLOW 0x008
462 #define XSCALE2_COUNT3_OVERFLOW 0x010
463 #define XSCALE2_CCOUNT_INT_EN 0x001
464 #define XSCALE2_COUNT0_INT_EN 0x002
465 #define XSCALE2_COUNT1_INT_EN 0x004
466 #define XSCALE2_COUNT2_INT_EN 0x008
467 #define XSCALE2_COUNT3_INT_EN 0x010
468 #define XSCALE2_COUNT0_EVT_SHFT 0
469 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
470 #define XSCALE2_COUNT1_EVT_SHFT 8
471 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
472 #define XSCALE2_COUNT2_EVT_SHFT 16
473 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
474 #define XSCALE2_COUNT3_EVT_SHFT 24
475 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
478 xscale2pmu_read_pmnc(void)
481 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
482 /* bits 1-2 and 4-23 are read-unpredictable */
483 return val & 0xff000009;
487 xscale2pmu_write_pmnc(u32 val)
489 /* bits 4-23 are write-as-0, 24-31 are write ignored */
491 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
495 xscale2pmu_read_overflow_flags(void)
498 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
503 xscale2pmu_write_overflow_flags(u32 val)
505 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
509 xscale2pmu_read_event_select(void)
512 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
517 xscale2pmu_write_event_select(u32 val)
519 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
523 xscale2pmu_read_int_enable(void)
526 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
531 xscale2pmu_write_int_enable(u32 val)
533 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
537 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
538 enum xscale_counters counter)
543 case XSCALE_CYCLE_COUNTER:
544 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
546 case XSCALE_COUNTER0:
547 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
549 case XSCALE_COUNTER1:
550 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
552 case XSCALE_COUNTER2:
553 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
555 case XSCALE_COUNTER3:
556 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
559 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
566 xscale2pmu_handle_irq(int irq_num, void *dev)
568 unsigned long pmnc, of_flags;
569 struct perf_sample_data data;
570 struct pmu_hw_events *cpuc;
571 struct pt_regs *regs;
574 /* Disable the PMU. */
575 pmnc = xscale2pmu_read_pmnc();
576 xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
578 /* Check the overflow flag register. */
579 of_flags = xscale2pmu_read_overflow_flags();
580 if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
583 /* Clear the overflow bits. */
584 xscale2pmu_write_overflow_flags(of_flags);
586 regs = get_irq_regs();
588 perf_sample_data_init(&data, 0);
590 cpuc = &__get_cpu_var(cpu_hw_events);
591 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
592 struct perf_event *event = cpuc->events[idx];
593 struct hw_perf_event *hwc;
595 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
599 armpmu_event_update(event, hwc, idx, 1);
600 data.period = event->hw.last_period;
601 if (!armpmu_event_set_period(event, hwc, idx))
604 if (perf_event_overflow(event, &data, regs))
605 cpu_pmu->disable(hwc, idx);
613 pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
614 xscale2pmu_write_pmnc(pmnc);
620 xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
622 unsigned long flags, ien, evtsel;
623 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
625 ien = xscale2pmu_read_int_enable();
626 evtsel = xscale2pmu_read_event_select();
629 case XSCALE_CYCLE_COUNTER:
630 ien |= XSCALE2_CCOUNT_INT_EN;
632 case XSCALE_COUNTER0:
633 ien |= XSCALE2_COUNT0_INT_EN;
634 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
635 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
637 case XSCALE_COUNTER1:
638 ien |= XSCALE2_COUNT1_INT_EN;
639 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
640 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
642 case XSCALE_COUNTER2:
643 ien |= XSCALE2_COUNT2_INT_EN;
644 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
645 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
647 case XSCALE_COUNTER3:
648 ien |= XSCALE2_COUNT3_INT_EN;
649 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
650 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
653 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
657 raw_spin_lock_irqsave(&events->pmu_lock, flags);
658 xscale2pmu_write_event_select(evtsel);
659 xscale2pmu_write_int_enable(ien);
660 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
664 xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
666 unsigned long flags, ien, evtsel;
667 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
669 ien = xscale2pmu_read_int_enable();
670 evtsel = xscale2pmu_read_event_select();
673 case XSCALE_CYCLE_COUNTER:
674 ien &= ~XSCALE2_CCOUNT_INT_EN;
676 case XSCALE_COUNTER0:
677 ien &= ~XSCALE2_COUNT0_INT_EN;
678 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
679 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
681 case XSCALE_COUNTER1:
682 ien &= ~XSCALE2_COUNT1_INT_EN;
683 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
684 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
686 case XSCALE_COUNTER2:
687 ien &= ~XSCALE2_COUNT2_INT_EN;
688 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
689 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
691 case XSCALE_COUNTER3:
692 ien &= ~XSCALE2_COUNT3_INT_EN;
693 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
694 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
697 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
701 raw_spin_lock_irqsave(&events->pmu_lock, flags);
702 xscale2pmu_write_event_select(evtsel);
703 xscale2pmu_write_int_enable(ien);
704 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
708 xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
709 struct hw_perf_event *event)
711 int idx = xscale1pmu_get_event_idx(cpuc, event);
715 if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
716 idx = XSCALE_COUNTER3;
717 else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
718 idx = XSCALE_COUNTER2;
724 xscale2pmu_start(void)
726 unsigned long flags, val;
727 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
729 raw_spin_lock_irqsave(&events->pmu_lock, flags);
730 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
731 val |= XSCALE_PMU_ENABLE;
732 xscale2pmu_write_pmnc(val);
733 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
737 xscale2pmu_stop(void)
739 unsigned long flags, val;
740 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
742 raw_spin_lock_irqsave(&events->pmu_lock, flags);
743 val = xscale2pmu_read_pmnc();
744 val &= ~XSCALE_PMU_ENABLE;
745 xscale2pmu_write_pmnc(val);
746 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
750 xscale2pmu_read_counter(int counter)
755 case XSCALE_CYCLE_COUNTER:
756 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
758 case XSCALE_COUNTER0:
759 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
761 case XSCALE_COUNTER1:
762 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
764 case XSCALE_COUNTER2:
765 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
767 case XSCALE_COUNTER3:
768 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
776 xscale2pmu_write_counter(int counter, u32 val)
779 case XSCALE_CYCLE_COUNTER:
780 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
782 case XSCALE_COUNTER0:
783 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
785 case XSCALE_COUNTER1:
786 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
788 case XSCALE_COUNTER2:
789 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
791 case XSCALE_COUNTER3:
792 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
797 static struct arm_pmu xscale2pmu = {
798 .id = ARM_PERF_PMU_ID_XSCALE2,
800 .handle_irq = xscale2pmu_handle_irq,
801 .enable = xscale2pmu_enable_event,
802 .disable = xscale2pmu_disable_event,
803 .read_counter = xscale2pmu_read_counter,
804 .write_counter = xscale2pmu_write_counter,
805 .get_event_idx = xscale2pmu_get_event_idx,
806 .start = xscale2pmu_start,
807 .stop = xscale2pmu_stop,
808 .map_event = xscale_map_event,
810 .max_period = (1LLU << 32) - 1,
813 static struct arm_pmu *__init xscale2pmu_init(void)
818 static struct arm_pmu *__init xscale1pmu_init(void)
823 static struct arm_pmu *__init xscale2pmu_init(void)
827 #endif /* CONFIG_CPU_XSCALE */