2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
24 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
25 * using the CPU's debug registers.
26 * This file contains the arch-independent routines.
29 #include <linux/irqflags.h>
30 #include <linux/kallsyms.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/percpu.h>
37 #include <linux/sched.h>
38 #include <linux/init.h>
39 #include <linux/smp.h>
41 #include <linux/hw_breakpoint.h>
47 /* Number of pinned cpu breakpoints in a cpu */
48 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
50 /* Number of pinned task breakpoints in a cpu */
51 static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
53 /* Number of non-pinned cpu/task breakpoints in a cpu */
54 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
56 /* Gather the number of total pinned and un-pinned bp in a cpuset */
57 struct bp_busy_slots {
59 unsigned int flexible;
62 /* Serialize accesses to the above constraints */
63 static DEFINE_MUTEX(nr_bp_mutex);
66 * Report the maximum number of pinned breakpoints a task
69 static unsigned int max_task_bp_pinned(int cpu)
72 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
74 for (i = HBP_NUM -1; i >= 0; i--) {
75 if (tsk_pinned[i] > 0)
83 * Report the number of pinned/un-pinned breakpoints we have in
84 * a given cpu (cpu > -1) or in all of them (cpu = -1).
86 static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
89 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
90 slots->pinned += max_task_bp_pinned(cpu);
91 slots->flexible = per_cpu(nr_bp_flexible, cpu);
96 for_each_online_cpu(cpu) {
99 nr = per_cpu(nr_cpu_bp_pinned, cpu);
100 nr += max_task_bp_pinned(cpu);
102 if (nr > slots->pinned)
105 nr = per_cpu(nr_bp_flexible, cpu);
107 if (nr > slots->flexible)
108 slots->flexible = nr;
113 * Add a pinned breakpoint for the given task in our constraint table
115 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
118 struct perf_event *bp;
119 struct perf_event_context *ctx = tsk->perf_event_ctxp;
120 unsigned int *task_bp_pinned;
121 struct list_head *list;
124 if (WARN_ONCE(!ctx, "No perf context for this task"))
127 list = &ctx->event_list;
129 spin_lock_irqsave(&ctx->lock, flags);
132 * The current breakpoint counter is not included in the list
133 * at the open() callback time
135 list_for_each_entry(bp, list, event_entry) {
136 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
140 spin_unlock_irqrestore(&ctx->lock, flags);
142 if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
145 task_bp_pinned = per_cpu(task_bp_pinned, cpu);
147 task_bp_pinned[count]++;
149 task_bp_pinned[count-1]--;
151 task_bp_pinned[count]--;
153 task_bp_pinned[count-1]++;
158 * Add/remove the given breakpoint in our constraint table
160 static void toggle_bp_slot(struct perf_event *bp, bool enable)
163 struct task_struct *tsk = bp->ctx->task;
165 /* Pinned counter task profiling */
168 toggle_bp_task_slot(tsk, cpu, enable);
172 for_each_online_cpu(cpu)
173 toggle_bp_task_slot(tsk, cpu, enable);
177 /* Pinned counter cpu profiling */
179 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
181 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
185 * Contraints to check before allowing this new breakpoint counter:
187 * == Non-pinned counter == (Considered as pinned for now)
189 * - If attached to a single cpu, check:
191 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
192 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
194 * -> If there are already non-pinned counters in this cpu, it means
195 * there is already a free slot for them.
196 * Otherwise, we check that the maximum number of per task
197 * breakpoints (for this cpu) plus the number of per cpu breakpoint
198 * (for this cpu) doesn't cover every registers.
200 * - If attached to every cpus, check:
202 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
203 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
205 * -> This is roughly the same, except we check the number of per cpu
206 * bp for every cpu and we keep the max one. Same for the per tasks
210 * == Pinned counter ==
212 * - If attached to a single cpu, check:
214 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
215 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
217 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
218 * one register at least (or they will never be fed).
220 * - If attached to every cpus, check:
222 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
223 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
225 int reserve_bp_slot(struct perf_event *bp)
227 struct bp_busy_slots slots = {0};
230 mutex_lock(&nr_bp_mutex);
232 fetch_bp_busy_slots(&slots, bp->cpu);
234 /* Flexible counters need to keep at least one slot */
235 if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
240 toggle_bp_slot(bp, true);
243 mutex_unlock(&nr_bp_mutex);
248 void release_bp_slot(struct perf_event *bp)
250 mutex_lock(&nr_bp_mutex);
252 toggle_bp_slot(bp, false);
254 mutex_unlock(&nr_bp_mutex);
258 int __register_perf_hw_breakpoint(struct perf_event *bp)
262 ret = reserve_bp_slot(bp);
266 if (!bp->attr.disabled)
267 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
272 int register_perf_hw_breakpoint(struct perf_event *bp)
274 bp->callback = perf_bp_event;
276 return __register_perf_hw_breakpoint(bp);
280 * Register a breakpoint bound to a task and a given cpu.
281 * If cpu is -1, the breakpoint is active for the task in every cpu
282 * If the task is -1, the breakpoint is active for every tasks in the given
285 static struct perf_event *
286 register_user_hw_breakpoint_cpu(unsigned long addr,
289 perf_callback_t triggered,
294 struct perf_event_attr *attr;
295 struct perf_event *bp;
297 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
299 return ERR_PTR(-ENOMEM);
301 attr->type = PERF_TYPE_BREAKPOINT;
302 attr->size = sizeof(*attr);
303 attr->bp_addr = addr;
305 attr->bp_type = type;
307 * Such breakpoints are used by debuggers to trigger signals when
308 * we hit the excepted memory op. We can't miss such events, they
316 bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
323 * register_user_hw_breakpoint - register a hardware breakpoint for user space
324 * @addr: is the memory address that triggers the breakpoint
325 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
326 * @type: the type of the access to the memory (read/write/exec)
327 * @triggered: callback to trigger when we hit the breakpoint
328 * @tsk: pointer to 'task_struct' of the process to which the address belongs
329 * @active: should we activate it while registering it
333 register_user_hw_breakpoint(unsigned long addr,
336 perf_callback_t triggered,
337 struct task_struct *tsk,
340 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
341 tsk->pid, -1, active);
343 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
346 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
347 * @bp: the breakpoint structure to modify
348 * @addr: is the memory address that triggers the breakpoint
349 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
350 * @type: the type of the access to the memory (read/write/exec)
351 * @triggered: callback to trigger when we hit the breakpoint
352 * @tsk: pointer to 'task_struct' of the process to which the address belongs
353 * @active: should we activate it while registering it
356 modify_user_hw_breakpoint(struct perf_event *bp,
360 perf_callback_t triggered,
361 struct task_struct *tsk,
365 * FIXME: do it without unregistering
366 * - We don't want to lose our slot
367 * - If the new bp is incorrect, don't lose the older one
369 unregister_hw_breakpoint(bp);
371 return register_user_hw_breakpoint(addr, len, type, triggered,
374 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
377 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
378 * @bp: the breakpoint structure to unregister
380 void unregister_hw_breakpoint(struct perf_event *bp)
384 perf_event_release_kernel(bp);
386 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
388 static struct perf_event *
389 register_kernel_hw_breakpoint_cpu(unsigned long addr,
392 perf_callback_t triggered,
396 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
401 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
402 * @addr: is the memory address that triggers the breakpoint
403 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
404 * @type: the type of the access to the memory (read/write/exec)
405 * @triggered: callback to trigger when we hit the breakpoint
406 * @active: should we activate it while registering it
408 * @return a set of per_cpu pointers to perf events
411 register_wide_hw_breakpoint(unsigned long addr,
414 perf_callback_t triggered,
417 struct perf_event **cpu_events, **pevent, *bp;
421 cpu_events = alloc_percpu(typeof(*cpu_events));
423 return ERR_PTR(-ENOMEM);
425 for_each_possible_cpu(cpu) {
426 pevent = per_cpu_ptr(cpu_events, cpu);
427 bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
428 triggered, cpu, active);
432 if (IS_ERR(bp) || !bp) {
441 for_each_possible_cpu(cpu) {
442 pevent = per_cpu_ptr(cpu_events, cpu);
443 if (IS_ERR(*pevent) || !*pevent)
445 unregister_hw_breakpoint(*pevent);
447 free_percpu(cpu_events);
448 /* return the error if any */
451 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
454 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
455 * @cpu_events: the per cpu set of events to unregister
457 void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
460 struct perf_event **pevent;
462 for_each_possible_cpu(cpu) {
463 pevent = per_cpu_ptr(cpu_events, cpu);
464 unregister_hw_breakpoint(*pevent);
466 free_percpu(cpu_events);
468 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
470 static struct notifier_block hw_breakpoint_exceptions_nb = {
471 .notifier_call = hw_breakpoint_exceptions_notify,
472 /* we need to be notified first */
473 .priority = 0x7fffffff
476 static int __init init_hw_breakpoint(void)
478 return register_die_notifier(&hw_breakpoint_exceptions_nb);
480 core_initcall(init_hw_breakpoint);
483 struct pmu perf_ops_bp = {
484 .enable = arch_install_hw_breakpoint,
485 .disable = arch_uninstall_hw_breakpoint,
486 .read = hw_breakpoint_pmu_read,
487 .unthrottle = hw_breakpoint_pmu_unthrottle