kprobes: Prohibit to probe native_get_debugreg
[linux-flexiantxendom0-natty.git] / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *              Probes initial implementation (includes suggestions from
23  *              Rusty Russell).
24  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *              hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *              interface to access function arguments.
28  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *              exceptions notifier to be first on the priority list.
30  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *              <prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46 #include <linux/memory.h>
47
48 #include <asm-generic/sections.h>
49 #include <asm/cacheflush.h>
50 #include <asm/errno.h>
51 #include <asm/uaccess.h>
52
53 #define KPROBE_HASH_BITS 6
54 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
56
57 /*
58  * Some oddball architectures like 64bit powerpc have function descriptors
59  * so this must be overridable.
60  */
61 #ifndef kprobe_lookup_name
62 #define kprobe_lookup_name(name, addr) \
63         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64 #endif
65
66 static int kprobes_initialized;
67 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69
70 /* NOTE: change this value only with kprobe_mutex held */
71 static bool kprobes_all_disarmed;
72
73 static DEFINE_MUTEX(kprobe_mutex);      /* Protects kprobe_table */
74 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 static struct {
76         spinlock_t lock ____cacheline_aligned_in_smp;
77 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80 {
81         return &(kretprobe_table_locks[hash].lock);
82 }
83
84 /*
85  * Normally, functions that we'd want to prohibit kprobes in, are marked
86  * __kprobes. But, there are cases where such functions already belong to
87  * a different section (__sched for preempt_schedule)
88  *
89  * For such cases, we now have a blacklist
90  */
91 static struct kprobe_blackpoint kprobe_blacklist[] = {
92         {"preempt_schedule",},
93         {"native_get_debugreg",},
94         {NULL}    /* Terminator */
95 };
96
97 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
98 /*
99  * kprobe->ainsn.insn points to the copy of the instruction to be
100  * single-stepped. x86_64, POWER4 and above have no-exec support and
101  * stepping on the instruction on a vmalloced/kmalloced/data page
102  * is a recipe for disaster
103  */
104 #define INSNS_PER_PAGE  (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
105
106 struct kprobe_insn_page {
107         struct list_head list;
108         kprobe_opcode_t *insns;         /* Page of instruction slots */
109         char slot_used[INSNS_PER_PAGE];
110         int nused;
111         int ngarbage;
112 };
113
114 enum kprobe_slot_state {
115         SLOT_CLEAN = 0,
116         SLOT_DIRTY = 1,
117         SLOT_USED = 2,
118 };
119
120 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
121 static LIST_HEAD(kprobe_insn_pages);
122 static int kprobe_garbage_slots;
123 static int collect_garbage_slots(void);
124
125 static int __kprobes check_safety(void)
126 {
127         int ret = 0;
128 #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
129         ret = freeze_processes();
130         if (ret == 0) {
131                 struct task_struct *p, *q;
132                 do_each_thread(p, q) {
133                         if (p != current && p->state == TASK_RUNNING &&
134                             p->pid != 0) {
135                                 printk("Check failed: %s is running\n",p->comm);
136                                 ret = -1;
137                                 goto loop_end;
138                         }
139                 } while_each_thread(p, q);
140         }
141 loop_end:
142         thaw_processes();
143 #else
144         synchronize_sched();
145 #endif
146         return ret;
147 }
148
149 /**
150  * __get_insn_slot() - Find a slot on an executable page for an instruction.
151  * We allocate an executable page if there's no room on existing ones.
152  */
153 static kprobe_opcode_t __kprobes *__get_insn_slot(void)
154 {
155         struct kprobe_insn_page *kip;
156
157  retry:
158         list_for_each_entry(kip, &kprobe_insn_pages, list) {
159                 if (kip->nused < INSNS_PER_PAGE) {
160                         int i;
161                         for (i = 0; i < INSNS_PER_PAGE; i++) {
162                                 if (kip->slot_used[i] == SLOT_CLEAN) {
163                                         kip->slot_used[i] = SLOT_USED;
164                                         kip->nused++;
165                                         return kip->insns + (i * MAX_INSN_SIZE);
166                                 }
167                         }
168                         /* Surprise!  No unused slots.  Fix kip->nused. */
169                         kip->nused = INSNS_PER_PAGE;
170                 }
171         }
172
173         /* If there are any garbage slots, collect it and try again. */
174         if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
175                 goto retry;
176         }
177         /* All out of space.  Need to allocate a new page. Use slot 0. */
178         kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
179         if (!kip)
180                 return NULL;
181
182         /*
183          * Use module_alloc so this page is within +/- 2GB of where the
184          * kernel image and loaded module images reside. This is required
185          * so x86_64 can correctly handle the %rip-relative fixups.
186          */
187         kip->insns = module_alloc(PAGE_SIZE);
188         if (!kip->insns) {
189                 kfree(kip);
190                 return NULL;
191         }
192         INIT_LIST_HEAD(&kip->list);
193         list_add(&kip->list, &kprobe_insn_pages);
194         memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
195         kip->slot_used[0] = SLOT_USED;
196         kip->nused = 1;
197         kip->ngarbage = 0;
198         return kip->insns;
199 }
200
201 kprobe_opcode_t __kprobes *get_insn_slot(void)
202 {
203         kprobe_opcode_t *ret;
204         mutex_lock(&kprobe_insn_mutex);
205         ret = __get_insn_slot();
206         mutex_unlock(&kprobe_insn_mutex);
207         return ret;
208 }
209
210 /* Return 1 if all garbages are collected, otherwise 0. */
211 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
212 {
213         kip->slot_used[idx] = SLOT_CLEAN;
214         kip->nused--;
215         if (kip->nused == 0) {
216                 /*
217                  * Page is no longer in use.  Free it unless
218                  * it's the last one.  We keep the last one
219                  * so as not to have to set it up again the
220                  * next time somebody inserts a probe.
221                  */
222                 if (!list_is_singular(&kprobe_insn_pages)) {
223                         list_del(&kip->list);
224                         module_free(NULL, kip->insns);
225                         kfree(kip);
226                 }
227                 return 1;
228         }
229         return 0;
230 }
231
232 static int __kprobes collect_garbage_slots(void)
233 {
234         struct kprobe_insn_page *kip, *next;
235
236         /* Ensure no-one is preepmted on the garbages */
237         if (check_safety())
238                 return -EAGAIN;
239
240         list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
241                 int i;
242                 if (kip->ngarbage == 0)
243                         continue;
244                 kip->ngarbage = 0;      /* we will collect all garbages */
245                 for (i = 0; i < INSNS_PER_PAGE; i++) {
246                         if (kip->slot_used[i] == SLOT_DIRTY &&
247                             collect_one_slot(kip, i))
248                                 break;
249                 }
250         }
251         kprobe_garbage_slots = 0;
252         return 0;
253 }
254
255 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
256 {
257         struct kprobe_insn_page *kip;
258
259         mutex_lock(&kprobe_insn_mutex);
260         list_for_each_entry(kip, &kprobe_insn_pages, list) {
261                 if (kip->insns <= slot &&
262                     slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
263                         int i = (slot - kip->insns) / MAX_INSN_SIZE;
264                         if (dirty) {
265                                 kip->slot_used[i] = SLOT_DIRTY;
266                                 kip->ngarbage++;
267                         } else
268                                 collect_one_slot(kip, i);
269                         break;
270                 }
271         }
272
273         if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
274                 collect_garbage_slots();
275
276         mutex_unlock(&kprobe_insn_mutex);
277 }
278 #endif
279
280 /* We have preemption disabled.. so it is safe to use __ versions */
281 static inline void set_kprobe_instance(struct kprobe *kp)
282 {
283         __get_cpu_var(kprobe_instance) = kp;
284 }
285
286 static inline void reset_kprobe_instance(void)
287 {
288         __get_cpu_var(kprobe_instance) = NULL;
289 }
290
291 /*
292  * This routine is called either:
293  *      - under the kprobe_mutex - during kprobe_[un]register()
294  *                              OR
295  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
296  */
297 struct kprobe __kprobes *get_kprobe(void *addr)
298 {
299         struct hlist_head *head;
300         struct hlist_node *node;
301         struct kprobe *p;
302
303         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
304         hlist_for_each_entry_rcu(p, node, head, hlist) {
305                 if (p->addr == addr)
306                         return p;
307         }
308         return NULL;
309 }
310
311 /* Arm a kprobe with text_mutex */
312 static void __kprobes arm_kprobe(struct kprobe *kp)
313 {
314         mutex_lock(&text_mutex);
315         arch_arm_kprobe(kp);
316         mutex_unlock(&text_mutex);
317 }
318
319 /* Disarm a kprobe with text_mutex */
320 static void __kprobes disarm_kprobe(struct kprobe *kp)
321 {
322         mutex_lock(&text_mutex);
323         arch_disarm_kprobe(kp);
324         mutex_unlock(&text_mutex);
325 }
326
327 /*
328  * Aggregate handlers for multiple kprobes support - these handlers
329  * take care of invoking the individual kprobe handlers on p->list
330  */
331 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
332 {
333         struct kprobe *kp;
334
335         list_for_each_entry_rcu(kp, &p->list, list) {
336                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
337                         set_kprobe_instance(kp);
338                         if (kp->pre_handler(kp, regs))
339                                 return 1;
340                 }
341                 reset_kprobe_instance();
342         }
343         return 0;
344 }
345
346 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
347                                         unsigned long flags)
348 {
349         struct kprobe *kp;
350
351         list_for_each_entry_rcu(kp, &p->list, list) {
352                 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
353                         set_kprobe_instance(kp);
354                         kp->post_handler(kp, regs, flags);
355                         reset_kprobe_instance();
356                 }
357         }
358 }
359
360 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
361                                         int trapnr)
362 {
363         struct kprobe *cur = __get_cpu_var(kprobe_instance);
364
365         /*
366          * if we faulted "during" the execution of a user specified
367          * probe handler, invoke just that probe's fault handler
368          */
369         if (cur && cur->fault_handler) {
370                 if (cur->fault_handler(cur, regs, trapnr))
371                         return 1;
372         }
373         return 0;
374 }
375
376 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
377 {
378         struct kprobe *cur = __get_cpu_var(kprobe_instance);
379         int ret = 0;
380
381         if (cur && cur->break_handler) {
382                 if (cur->break_handler(cur, regs))
383                         ret = 1;
384         }
385         reset_kprobe_instance();
386         return ret;
387 }
388
389 /* Walks the list and increments nmissed count for multiprobe case */
390 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
391 {
392         struct kprobe *kp;
393         if (p->pre_handler != aggr_pre_handler) {
394                 p->nmissed++;
395         } else {
396                 list_for_each_entry_rcu(kp, &p->list, list)
397                         kp->nmissed++;
398         }
399         return;
400 }
401
402 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
403                                 struct hlist_head *head)
404 {
405         struct kretprobe *rp = ri->rp;
406
407         /* remove rp inst off the rprobe_inst_table */
408         hlist_del(&ri->hlist);
409         INIT_HLIST_NODE(&ri->hlist);
410         if (likely(rp)) {
411                 spin_lock(&rp->lock);
412                 hlist_add_head(&ri->hlist, &rp->free_instances);
413                 spin_unlock(&rp->lock);
414         } else
415                 /* Unregistering */
416                 hlist_add_head(&ri->hlist, head);
417 }
418
419 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
420                          struct hlist_head **head, unsigned long *flags)
421 {
422         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
423         spinlock_t *hlist_lock;
424
425         *head = &kretprobe_inst_table[hash];
426         hlist_lock = kretprobe_table_lock_ptr(hash);
427         spin_lock_irqsave(hlist_lock, *flags);
428 }
429
430 static void __kprobes kretprobe_table_lock(unsigned long hash,
431         unsigned long *flags)
432 {
433         spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
434         spin_lock_irqsave(hlist_lock, *flags);
435 }
436
437 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
438         unsigned long *flags)
439 {
440         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
441         spinlock_t *hlist_lock;
442
443         hlist_lock = kretprobe_table_lock_ptr(hash);
444         spin_unlock_irqrestore(hlist_lock, *flags);
445 }
446
447 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
448 {
449         spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
450         spin_unlock_irqrestore(hlist_lock, *flags);
451 }
452
453 /*
454  * This function is called from finish_task_switch when task tk becomes dead,
455  * so that we can recycle any function-return probe instances associated
456  * with this task. These left over instances represent probed functions
457  * that have been called but will never return.
458  */
459 void __kprobes kprobe_flush_task(struct task_struct *tk)
460 {
461         struct kretprobe_instance *ri;
462         struct hlist_head *head, empty_rp;
463         struct hlist_node *node, *tmp;
464         unsigned long hash, flags = 0;
465
466         if (unlikely(!kprobes_initialized))
467                 /* Early boot.  kretprobe_table_locks not yet initialized. */
468                 return;
469
470         hash = hash_ptr(tk, KPROBE_HASH_BITS);
471         head = &kretprobe_inst_table[hash];
472         kretprobe_table_lock(hash, &flags);
473         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
474                 if (ri->task == tk)
475                         recycle_rp_inst(ri, &empty_rp);
476         }
477         kretprobe_table_unlock(hash, &flags);
478         INIT_HLIST_HEAD(&empty_rp);
479         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
480                 hlist_del(&ri->hlist);
481                 kfree(ri);
482         }
483 }
484
485 static inline void free_rp_inst(struct kretprobe *rp)
486 {
487         struct kretprobe_instance *ri;
488         struct hlist_node *pos, *next;
489
490         hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
491                 hlist_del(&ri->hlist);
492                 kfree(ri);
493         }
494 }
495
496 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
497 {
498         unsigned long flags, hash;
499         struct kretprobe_instance *ri;
500         struct hlist_node *pos, *next;
501         struct hlist_head *head;
502
503         /* No race here */
504         for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
505                 kretprobe_table_lock(hash, &flags);
506                 head = &kretprobe_inst_table[hash];
507                 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
508                         if (ri->rp == rp)
509                                 ri->rp = NULL;
510                 }
511                 kretprobe_table_unlock(hash, &flags);
512         }
513         free_rp_inst(rp);
514 }
515
516 /*
517  * Keep all fields in the kprobe consistent
518  */
519 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
520 {
521         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
522         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
523 }
524
525 /*
526 * Add the new probe to ap->list. Fail if this is the
527 * second jprobe at the address - two jprobes can't coexist
528 */
529 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
530 {
531         BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
532         if (p->break_handler) {
533                 if (ap->break_handler)
534                         return -EEXIST;
535                 list_add_tail_rcu(&p->list, &ap->list);
536                 ap->break_handler = aggr_break_handler;
537         } else
538                 list_add_rcu(&p->list, &ap->list);
539         if (p->post_handler && !ap->post_handler)
540                 ap->post_handler = aggr_post_handler;
541
542         if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
543                 ap->flags &= ~KPROBE_FLAG_DISABLED;
544                 if (!kprobes_all_disarmed)
545                         /* Arm the breakpoint again. */
546                         arm_kprobe(ap);
547         }
548         return 0;
549 }
550
551 /*
552  * Fill in the required fields of the "manager kprobe". Replace the
553  * earlier kprobe in the hlist with the manager kprobe
554  */
555 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
556 {
557         copy_kprobe(p, ap);
558         flush_insn_slot(ap);
559         ap->addr = p->addr;
560         ap->flags = p->flags;
561         ap->pre_handler = aggr_pre_handler;
562         ap->fault_handler = aggr_fault_handler;
563         /* We don't care the kprobe which has gone. */
564         if (p->post_handler && !kprobe_gone(p))
565                 ap->post_handler = aggr_post_handler;
566         if (p->break_handler && !kprobe_gone(p))
567                 ap->break_handler = aggr_break_handler;
568
569         INIT_LIST_HEAD(&ap->list);
570         list_add_rcu(&p->list, &ap->list);
571
572         hlist_replace_rcu(&p->hlist, &ap->hlist);
573 }
574
575 /*
576  * This is the second or subsequent kprobe at the address - handle
577  * the intricacies
578  */
579 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
580                                           struct kprobe *p)
581 {
582         int ret = 0;
583         struct kprobe *ap = old_p;
584
585         if (old_p->pre_handler != aggr_pre_handler) {
586                 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
587                 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
588                 if (!ap)
589                         return -ENOMEM;
590                 add_aggr_kprobe(ap, old_p);
591         }
592
593         if (kprobe_gone(ap)) {
594                 /*
595                  * Attempting to insert new probe at the same location that
596                  * had a probe in the module vaddr area which already
597                  * freed. So, the instruction slot has already been
598                  * released. We need a new slot for the new probe.
599                  */
600                 ret = arch_prepare_kprobe(ap);
601                 if (ret)
602                         /*
603                          * Even if fail to allocate new slot, don't need to
604                          * free aggr_probe. It will be used next time, or
605                          * freed by unregister_kprobe.
606                          */
607                         return ret;
608
609                 /*
610                  * Clear gone flag to prevent allocating new slot again, and
611                  * set disabled flag because it is not armed yet.
612                  */
613                 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
614                             | KPROBE_FLAG_DISABLED;
615         }
616
617         copy_kprobe(ap, p);
618         return add_new_kprobe(ap, p);
619 }
620
621 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
622 static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
623 {
624         struct kprobe *kp;
625
626         list_for_each_entry_rcu(kp, &p->list, list) {
627                 if (!kprobe_disabled(kp))
628                         /*
629                          * There is an active probe on the list.
630                          * We can't disable aggr_kprobe.
631                          */
632                         return 0;
633         }
634         p->flags |= KPROBE_FLAG_DISABLED;
635         return 1;
636 }
637
638 static int __kprobes in_kprobes_functions(unsigned long addr)
639 {
640         struct kprobe_blackpoint *kb;
641
642         if (addr >= (unsigned long)__kprobes_text_start &&
643             addr < (unsigned long)__kprobes_text_end)
644                 return -EINVAL;
645         /*
646          * If there exists a kprobe_blacklist, verify and
647          * fail any probe registration in the prohibited area
648          */
649         for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
650                 if (kb->start_addr) {
651                         if (addr >= kb->start_addr &&
652                             addr < (kb->start_addr + kb->range))
653                                 return -EINVAL;
654                 }
655         }
656         return 0;
657 }
658
659 /*
660  * If we have a symbol_name argument, look it up and add the offset field
661  * to it. This way, we can specify a relative address to a symbol.
662  */
663 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
664 {
665         kprobe_opcode_t *addr = p->addr;
666         if (p->symbol_name) {
667                 if (addr)
668                         return NULL;
669                 kprobe_lookup_name(p->symbol_name, addr);
670         }
671
672         if (!addr)
673                 return NULL;
674         return (kprobe_opcode_t *)(((char *)addr) + p->offset);
675 }
676
677 int __kprobes register_kprobe(struct kprobe *p)
678 {
679         int ret = 0;
680         struct kprobe *old_p;
681         struct module *probed_mod;
682         kprobe_opcode_t *addr;
683
684         addr = kprobe_addr(p);
685         if (!addr)
686                 return -EINVAL;
687         p->addr = addr;
688
689         preempt_disable();
690         if (!kernel_text_address((unsigned long) p->addr) ||
691             in_kprobes_functions((unsigned long) p->addr)) {
692                 preempt_enable();
693                 return -EINVAL;
694         }
695
696         /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
697         p->flags &= KPROBE_FLAG_DISABLED;
698
699         /*
700          * Check if are we probing a module.
701          */
702         probed_mod = __module_text_address((unsigned long) p->addr);
703         if (probed_mod) {
704                 /*
705                  * We must hold a refcount of the probed module while updating
706                  * its code to prohibit unexpected unloading.
707                  */
708                 if (unlikely(!try_module_get(probed_mod))) {
709                         preempt_enable();
710                         return -EINVAL;
711                 }
712                 /*
713                  * If the module freed .init.text, we couldn't insert
714                  * kprobes in there.
715                  */
716                 if (within_module_init((unsigned long)p->addr, probed_mod) &&
717                     probed_mod->state != MODULE_STATE_COMING) {
718                         module_put(probed_mod);
719                         preempt_enable();
720                         return -EINVAL;
721                 }
722         }
723         preempt_enable();
724
725         p->nmissed = 0;
726         INIT_LIST_HEAD(&p->list);
727         mutex_lock(&kprobe_mutex);
728         old_p = get_kprobe(p->addr);
729         if (old_p) {
730                 ret = register_aggr_kprobe(old_p, p);
731                 goto out;
732         }
733
734         mutex_lock(&text_mutex);
735         ret = arch_prepare_kprobe(p);
736         if (ret)
737                 goto out_unlock_text;
738
739         INIT_HLIST_NODE(&p->hlist);
740         hlist_add_head_rcu(&p->hlist,
741                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
742
743         if (!kprobes_all_disarmed && !kprobe_disabled(p))
744                 arch_arm_kprobe(p);
745
746 out_unlock_text:
747         mutex_unlock(&text_mutex);
748 out:
749         mutex_unlock(&kprobe_mutex);
750
751         if (probed_mod)
752                 module_put(probed_mod);
753
754         return ret;
755 }
756 EXPORT_SYMBOL_GPL(register_kprobe);
757
758 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
759 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
760 {
761         struct kprobe *old_p, *list_p;
762
763         old_p = get_kprobe(p->addr);
764         if (unlikely(!old_p))
765                 return NULL;
766
767         if (p != old_p) {
768                 list_for_each_entry_rcu(list_p, &old_p->list, list)
769                         if (list_p == p)
770                         /* kprobe p is a valid probe */
771                                 goto valid;
772                 return NULL;
773         }
774 valid:
775         return old_p;
776 }
777
778 /*
779  * Unregister a kprobe without a scheduler synchronization.
780  */
781 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
782 {
783         struct kprobe *old_p, *list_p;
784
785         old_p = __get_valid_kprobe(p);
786         if (old_p == NULL)
787                 return -EINVAL;
788
789         if (old_p == p ||
790             (old_p->pre_handler == aggr_pre_handler &&
791              list_is_singular(&old_p->list))) {
792                 /*
793                  * Only probe on the hash list. Disarm only if kprobes are
794                  * enabled and not gone - otherwise, the breakpoint would
795                  * already have been removed. We save on flushing icache.
796                  */
797                 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
798                         disarm_kprobe(p);
799                 hlist_del_rcu(&old_p->hlist);
800         } else {
801                 if (p->break_handler && !kprobe_gone(p))
802                         old_p->break_handler = NULL;
803                 if (p->post_handler && !kprobe_gone(p)) {
804                         list_for_each_entry_rcu(list_p, &old_p->list, list) {
805                                 if ((list_p != p) && (list_p->post_handler))
806                                         goto noclean;
807                         }
808                         old_p->post_handler = NULL;
809                 }
810 noclean:
811                 list_del_rcu(&p->list);
812                 if (!kprobe_disabled(old_p)) {
813                         try_to_disable_aggr_kprobe(old_p);
814                         if (!kprobes_all_disarmed && kprobe_disabled(old_p))
815                                 disarm_kprobe(old_p);
816                 }
817         }
818         return 0;
819 }
820
821 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
822 {
823         struct kprobe *old_p;
824
825         if (list_empty(&p->list))
826                 arch_remove_kprobe(p);
827         else if (list_is_singular(&p->list)) {
828                 /* "p" is the last child of an aggr_kprobe */
829                 old_p = list_entry(p->list.next, struct kprobe, list);
830                 list_del(&p->list);
831                 arch_remove_kprobe(old_p);
832                 kfree(old_p);
833         }
834 }
835
836 int __kprobes register_kprobes(struct kprobe **kps, int num)
837 {
838         int i, ret = 0;
839
840         if (num <= 0)
841                 return -EINVAL;
842         for (i = 0; i < num; i++) {
843                 ret = register_kprobe(kps[i]);
844                 if (ret < 0) {
845                         if (i > 0)
846                                 unregister_kprobes(kps, i);
847                         break;
848                 }
849         }
850         return ret;
851 }
852 EXPORT_SYMBOL_GPL(register_kprobes);
853
854 void __kprobes unregister_kprobe(struct kprobe *p)
855 {
856         unregister_kprobes(&p, 1);
857 }
858 EXPORT_SYMBOL_GPL(unregister_kprobe);
859
860 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
861 {
862         int i;
863
864         if (num <= 0)
865                 return;
866         mutex_lock(&kprobe_mutex);
867         for (i = 0; i < num; i++)
868                 if (__unregister_kprobe_top(kps[i]) < 0)
869                         kps[i]->addr = NULL;
870         mutex_unlock(&kprobe_mutex);
871
872         synchronize_sched();
873         for (i = 0; i < num; i++)
874                 if (kps[i]->addr)
875                         __unregister_kprobe_bottom(kps[i]);
876 }
877 EXPORT_SYMBOL_GPL(unregister_kprobes);
878
879 static struct notifier_block kprobe_exceptions_nb = {
880         .notifier_call = kprobe_exceptions_notify,
881         .priority = 0x7fffffff /* we need to be notified first */
882 };
883
884 unsigned long __weak arch_deref_entry_point(void *entry)
885 {
886         return (unsigned long)entry;
887 }
888
889 int __kprobes register_jprobes(struct jprobe **jps, int num)
890 {
891         struct jprobe *jp;
892         int ret = 0, i;
893
894         if (num <= 0)
895                 return -EINVAL;
896         for (i = 0; i < num; i++) {
897                 unsigned long addr;
898                 jp = jps[i];
899                 addr = arch_deref_entry_point(jp->entry);
900
901                 if (!kernel_text_address(addr))
902                         ret = -EINVAL;
903                 else {
904                         /* Todo: Verify probepoint is a function entry point */
905                         jp->kp.pre_handler = setjmp_pre_handler;
906                         jp->kp.break_handler = longjmp_break_handler;
907                         ret = register_kprobe(&jp->kp);
908                 }
909                 if (ret < 0) {
910                         if (i > 0)
911                                 unregister_jprobes(jps, i);
912                         break;
913                 }
914         }
915         return ret;
916 }
917 EXPORT_SYMBOL_GPL(register_jprobes);
918
919 int __kprobes register_jprobe(struct jprobe *jp)
920 {
921         return register_jprobes(&jp, 1);
922 }
923 EXPORT_SYMBOL_GPL(register_jprobe);
924
925 void __kprobes unregister_jprobe(struct jprobe *jp)
926 {
927         unregister_jprobes(&jp, 1);
928 }
929 EXPORT_SYMBOL_GPL(unregister_jprobe);
930
931 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
932 {
933         int i;
934
935         if (num <= 0)
936                 return;
937         mutex_lock(&kprobe_mutex);
938         for (i = 0; i < num; i++)
939                 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
940                         jps[i]->kp.addr = NULL;
941         mutex_unlock(&kprobe_mutex);
942
943         synchronize_sched();
944         for (i = 0; i < num; i++) {
945                 if (jps[i]->kp.addr)
946                         __unregister_kprobe_bottom(&jps[i]->kp);
947         }
948 }
949 EXPORT_SYMBOL_GPL(unregister_jprobes);
950
951 #ifdef CONFIG_KRETPROBES
952 /*
953  * This kprobe pre_handler is registered with every kretprobe. When probe
954  * hits it will set up the return probe.
955  */
956 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
957                                            struct pt_regs *regs)
958 {
959         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
960         unsigned long hash, flags = 0;
961         struct kretprobe_instance *ri;
962
963         /*TODO: consider to only swap the RA after the last pre_handler fired */
964         hash = hash_ptr(current, KPROBE_HASH_BITS);
965         spin_lock_irqsave(&rp->lock, flags);
966         if (!hlist_empty(&rp->free_instances)) {
967                 ri = hlist_entry(rp->free_instances.first,
968                                 struct kretprobe_instance, hlist);
969                 hlist_del(&ri->hlist);
970                 spin_unlock_irqrestore(&rp->lock, flags);
971
972                 ri->rp = rp;
973                 ri->task = current;
974
975                 if (rp->entry_handler && rp->entry_handler(ri, regs))
976                         return 0;
977
978                 arch_prepare_kretprobe(ri, regs);
979
980                 /* XXX(hch): why is there no hlist_move_head? */
981                 INIT_HLIST_NODE(&ri->hlist);
982                 kretprobe_table_lock(hash, &flags);
983                 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
984                 kretprobe_table_unlock(hash, &flags);
985         } else {
986                 rp->nmissed++;
987                 spin_unlock_irqrestore(&rp->lock, flags);
988         }
989         return 0;
990 }
991
992 int __kprobes register_kretprobe(struct kretprobe *rp)
993 {
994         int ret = 0;
995         struct kretprobe_instance *inst;
996         int i;
997         void *addr;
998
999         if (kretprobe_blacklist_size) {
1000                 addr = kprobe_addr(&rp->kp);
1001                 if (!addr)
1002                         return -EINVAL;
1003
1004                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1005                         if (kretprobe_blacklist[i].addr == addr)
1006                                 return -EINVAL;
1007                 }
1008         }
1009
1010         rp->kp.pre_handler = pre_handler_kretprobe;
1011         rp->kp.post_handler = NULL;
1012         rp->kp.fault_handler = NULL;
1013         rp->kp.break_handler = NULL;
1014
1015         /* Pre-allocate memory for max kretprobe instances */
1016         if (rp->maxactive <= 0) {
1017 #ifdef CONFIG_PREEMPT
1018                 rp->maxactive = max(10, 2 * NR_CPUS);
1019 #else
1020                 rp->maxactive = NR_CPUS;
1021 #endif
1022         }
1023         spin_lock_init(&rp->lock);
1024         INIT_HLIST_HEAD(&rp->free_instances);
1025         for (i = 0; i < rp->maxactive; i++) {
1026                 inst = kmalloc(sizeof(struct kretprobe_instance) +
1027                                rp->data_size, GFP_KERNEL);
1028                 if (inst == NULL) {
1029                         free_rp_inst(rp);
1030                         return -ENOMEM;
1031                 }
1032                 INIT_HLIST_NODE(&inst->hlist);
1033                 hlist_add_head(&inst->hlist, &rp->free_instances);
1034         }
1035
1036         rp->nmissed = 0;
1037         /* Establish function entry probe point */
1038         ret = register_kprobe(&rp->kp);
1039         if (ret != 0)
1040                 free_rp_inst(rp);
1041         return ret;
1042 }
1043 EXPORT_SYMBOL_GPL(register_kretprobe);
1044
1045 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1046 {
1047         int ret = 0, i;
1048
1049         if (num <= 0)
1050                 return -EINVAL;
1051         for (i = 0; i < num; i++) {
1052                 ret = register_kretprobe(rps[i]);
1053                 if (ret < 0) {
1054                         if (i > 0)
1055                                 unregister_kretprobes(rps, i);
1056                         break;
1057                 }
1058         }
1059         return ret;
1060 }
1061 EXPORT_SYMBOL_GPL(register_kretprobes);
1062
1063 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1064 {
1065         unregister_kretprobes(&rp, 1);
1066 }
1067 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1068
1069 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1070 {
1071         int i;
1072
1073         if (num <= 0)
1074                 return;
1075         mutex_lock(&kprobe_mutex);
1076         for (i = 0; i < num; i++)
1077                 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1078                         rps[i]->kp.addr = NULL;
1079         mutex_unlock(&kprobe_mutex);
1080
1081         synchronize_sched();
1082         for (i = 0; i < num; i++) {
1083                 if (rps[i]->kp.addr) {
1084                         __unregister_kprobe_bottom(&rps[i]->kp);
1085                         cleanup_rp_inst(rps[i]);
1086                 }
1087         }
1088 }
1089 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1090
1091 #else /* CONFIG_KRETPROBES */
1092 int __kprobes register_kretprobe(struct kretprobe *rp)
1093 {
1094         return -ENOSYS;
1095 }
1096 EXPORT_SYMBOL_GPL(register_kretprobe);
1097
1098 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1099 {
1100         return -ENOSYS;
1101 }
1102 EXPORT_SYMBOL_GPL(register_kretprobes);
1103
1104 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1105 {
1106 }
1107 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1108
1109 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1110 {
1111 }
1112 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1113
1114 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1115                                            struct pt_regs *regs)
1116 {
1117         return 0;
1118 }
1119
1120 #endif /* CONFIG_KRETPROBES */
1121
1122 /* Set the kprobe gone and remove its instruction buffer. */
1123 static void __kprobes kill_kprobe(struct kprobe *p)
1124 {
1125         struct kprobe *kp;
1126
1127         p->flags |= KPROBE_FLAG_GONE;
1128         if (p->pre_handler == aggr_pre_handler) {
1129                 /*
1130                  * If this is an aggr_kprobe, we have to list all the
1131                  * chained probes and mark them GONE.
1132                  */
1133                 list_for_each_entry_rcu(kp, &p->list, list)
1134                         kp->flags |= KPROBE_FLAG_GONE;
1135                 p->post_handler = NULL;
1136                 p->break_handler = NULL;
1137         }
1138         /*
1139          * Here, we can remove insn_slot safely, because no thread calls
1140          * the original probed function (which will be freed soon) any more.
1141          */
1142         arch_remove_kprobe(p);
1143 }
1144
1145 void __kprobes dump_kprobe(struct kprobe *kp)
1146 {
1147         printk(KERN_WARNING "Dumping kprobe:\n");
1148         printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1149                kp->symbol_name, kp->addr, kp->offset);
1150 }
1151
1152 /* Module notifier call back, checking kprobes on the module */
1153 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1154                                              unsigned long val, void *data)
1155 {
1156         struct module *mod = data;
1157         struct hlist_head *head;
1158         struct hlist_node *node;
1159         struct kprobe *p;
1160         unsigned int i;
1161         int checkcore = (val == MODULE_STATE_GOING);
1162
1163         if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1164                 return NOTIFY_DONE;
1165
1166         /*
1167          * When MODULE_STATE_GOING was notified, both of module .text and
1168          * .init.text sections would be freed. When MODULE_STATE_LIVE was
1169          * notified, only .init.text section would be freed. We need to
1170          * disable kprobes which have been inserted in the sections.
1171          */
1172         mutex_lock(&kprobe_mutex);
1173         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1174                 head = &kprobe_table[i];
1175                 hlist_for_each_entry_rcu(p, node, head, hlist)
1176                         if (within_module_init((unsigned long)p->addr, mod) ||
1177                             (checkcore &&
1178                              within_module_core((unsigned long)p->addr, mod))) {
1179                                 /*
1180                                  * The vaddr this probe is installed will soon
1181                                  * be vfreed buy not synced to disk. Hence,
1182                                  * disarming the breakpoint isn't needed.
1183                                  */
1184                                 kill_kprobe(p);
1185                         }
1186         }
1187         mutex_unlock(&kprobe_mutex);
1188         return NOTIFY_DONE;
1189 }
1190
1191 static struct notifier_block kprobe_module_nb = {
1192         .notifier_call = kprobes_module_callback,
1193         .priority = 0
1194 };
1195
1196 static int __init init_kprobes(void)
1197 {
1198         int i, err = 0;
1199         unsigned long offset = 0, size = 0;
1200         char *modname, namebuf[128];
1201         const char *symbol_name;
1202         void *addr;
1203         struct kprobe_blackpoint *kb;
1204
1205         /* FIXME allocate the probe table, currently defined statically */
1206         /* initialize all list heads */
1207         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1208                 INIT_HLIST_HEAD(&kprobe_table[i]);
1209                 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1210                 spin_lock_init(&(kretprobe_table_locks[i].lock));
1211         }
1212
1213         /*
1214          * Lookup and populate the kprobe_blacklist.
1215          *
1216          * Unlike the kretprobe blacklist, we'll need to determine
1217          * the range of addresses that belong to the said functions,
1218          * since a kprobe need not necessarily be at the beginning
1219          * of a function.
1220          */
1221         for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1222                 kprobe_lookup_name(kb->name, addr);
1223                 if (!addr)
1224                         continue;
1225
1226                 kb->start_addr = (unsigned long)addr;
1227                 symbol_name = kallsyms_lookup(kb->start_addr,
1228                                 &size, &offset, &modname, namebuf);
1229                 if (!symbol_name)
1230                         kb->range = 0;
1231                 else
1232                         kb->range = size;
1233         }
1234
1235         if (kretprobe_blacklist_size) {
1236                 /* lookup the function address from its name */
1237                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1238                         kprobe_lookup_name(kretprobe_blacklist[i].name,
1239                                            kretprobe_blacklist[i].addr);
1240                         if (!kretprobe_blacklist[i].addr)
1241                                 printk("kretprobe: lookup failed: %s\n",
1242                                        kretprobe_blacklist[i].name);
1243                 }
1244         }
1245
1246         /* By default, kprobes are armed */
1247         kprobes_all_disarmed = false;
1248
1249         err = arch_init_kprobes();
1250         if (!err)
1251                 err = register_die_notifier(&kprobe_exceptions_nb);
1252         if (!err)
1253                 err = register_module_notifier(&kprobe_module_nb);
1254
1255         kprobes_initialized = (err == 0);
1256
1257         if (!err)
1258                 init_test_probes();
1259         return err;
1260 }
1261
1262 #ifdef CONFIG_DEBUG_FS
1263 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1264                 const char *sym, int offset,char *modname)
1265 {
1266         char *kprobe_type;
1267
1268         if (p->pre_handler == pre_handler_kretprobe)
1269                 kprobe_type = "r";
1270         else if (p->pre_handler == setjmp_pre_handler)
1271                 kprobe_type = "j";
1272         else
1273                 kprobe_type = "k";
1274         if (sym)
1275                 seq_printf(pi, "%p  %s  %s+0x%x  %s %s%s\n",
1276                         p->addr, kprobe_type, sym, offset,
1277                         (modname ? modname : " "),
1278                         (kprobe_gone(p) ? "[GONE]" : ""),
1279                         ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1280                          "[DISABLED]" : ""));
1281         else
1282                 seq_printf(pi, "%p  %s  %p %s%s\n",
1283                         p->addr, kprobe_type, p->addr,
1284                         (kprobe_gone(p) ? "[GONE]" : ""),
1285                         ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1286                          "[DISABLED]" : ""));
1287 }
1288
1289 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1290 {
1291         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1292 }
1293
1294 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1295 {
1296         (*pos)++;
1297         if (*pos >= KPROBE_TABLE_SIZE)
1298                 return NULL;
1299         return pos;
1300 }
1301
1302 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1303 {
1304         /* Nothing to do */
1305 }
1306
1307 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1308 {
1309         struct hlist_head *head;
1310         struct hlist_node *node;
1311         struct kprobe *p, *kp;
1312         const char *sym = NULL;
1313         unsigned int i = *(loff_t *) v;
1314         unsigned long offset = 0;
1315         char *modname, namebuf[128];
1316
1317         head = &kprobe_table[i];
1318         preempt_disable();
1319         hlist_for_each_entry_rcu(p, node, head, hlist) {
1320                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1321                                         &offset, &modname, namebuf);
1322                 if (p->pre_handler == aggr_pre_handler) {
1323                         list_for_each_entry_rcu(kp, &p->list, list)
1324                                 report_probe(pi, kp, sym, offset, modname);
1325                 } else
1326                         report_probe(pi, p, sym, offset, modname);
1327         }
1328         preempt_enable();
1329         return 0;
1330 }
1331
1332 static struct seq_operations kprobes_seq_ops = {
1333         .start = kprobe_seq_start,
1334         .next  = kprobe_seq_next,
1335         .stop  = kprobe_seq_stop,
1336         .show  = show_kprobe_addr
1337 };
1338
1339 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1340 {
1341         return seq_open(filp, &kprobes_seq_ops);
1342 }
1343
1344 static struct file_operations debugfs_kprobes_operations = {
1345         .open           = kprobes_open,
1346         .read           = seq_read,
1347         .llseek         = seq_lseek,
1348         .release        = seq_release,
1349 };
1350
1351 /* Disable one kprobe */
1352 int __kprobes disable_kprobe(struct kprobe *kp)
1353 {
1354         int ret = 0;
1355         struct kprobe *p;
1356
1357         mutex_lock(&kprobe_mutex);
1358
1359         /* Check whether specified probe is valid. */
1360         p = __get_valid_kprobe(kp);
1361         if (unlikely(p == NULL)) {
1362                 ret = -EINVAL;
1363                 goto out;
1364         }
1365
1366         /* If the probe is already disabled (or gone), just return */
1367         if (kprobe_disabled(kp))
1368                 goto out;
1369
1370         kp->flags |= KPROBE_FLAG_DISABLED;
1371         if (p != kp)
1372                 /* When kp != p, p is always enabled. */
1373                 try_to_disable_aggr_kprobe(p);
1374
1375         if (!kprobes_all_disarmed && kprobe_disabled(p))
1376                 disarm_kprobe(p);
1377 out:
1378         mutex_unlock(&kprobe_mutex);
1379         return ret;
1380 }
1381 EXPORT_SYMBOL_GPL(disable_kprobe);
1382
1383 /* Enable one kprobe */
1384 int __kprobes enable_kprobe(struct kprobe *kp)
1385 {
1386         int ret = 0;
1387         struct kprobe *p;
1388
1389         mutex_lock(&kprobe_mutex);
1390
1391         /* Check whether specified probe is valid. */
1392         p = __get_valid_kprobe(kp);
1393         if (unlikely(p == NULL)) {
1394                 ret = -EINVAL;
1395                 goto out;
1396         }
1397
1398         if (kprobe_gone(kp)) {
1399                 /* This kprobe has gone, we couldn't enable it. */
1400                 ret = -EINVAL;
1401                 goto out;
1402         }
1403
1404         if (!kprobes_all_disarmed && kprobe_disabled(p))
1405                 arm_kprobe(p);
1406
1407         p->flags &= ~KPROBE_FLAG_DISABLED;
1408         if (p != kp)
1409                 kp->flags &= ~KPROBE_FLAG_DISABLED;
1410 out:
1411         mutex_unlock(&kprobe_mutex);
1412         return ret;
1413 }
1414 EXPORT_SYMBOL_GPL(enable_kprobe);
1415
1416 static void __kprobes arm_all_kprobes(void)
1417 {
1418         struct hlist_head *head;
1419         struct hlist_node *node;
1420         struct kprobe *p;
1421         unsigned int i;
1422
1423         mutex_lock(&kprobe_mutex);
1424
1425         /* If kprobes are armed, just return */
1426         if (!kprobes_all_disarmed)
1427                 goto already_enabled;
1428
1429         mutex_lock(&text_mutex);
1430         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1431                 head = &kprobe_table[i];
1432                 hlist_for_each_entry_rcu(p, node, head, hlist)
1433                         if (!kprobe_disabled(p))
1434                                 arch_arm_kprobe(p);
1435         }
1436         mutex_unlock(&text_mutex);
1437
1438         kprobes_all_disarmed = false;
1439         printk(KERN_INFO "Kprobes globally enabled\n");
1440
1441 already_enabled:
1442         mutex_unlock(&kprobe_mutex);
1443         return;
1444 }
1445
1446 static void __kprobes disarm_all_kprobes(void)
1447 {
1448         struct hlist_head *head;
1449         struct hlist_node *node;
1450         struct kprobe *p;
1451         unsigned int i;
1452
1453         mutex_lock(&kprobe_mutex);
1454
1455         /* If kprobes are already disarmed, just return */
1456         if (kprobes_all_disarmed)
1457                 goto already_disabled;
1458
1459         kprobes_all_disarmed = true;
1460         printk(KERN_INFO "Kprobes globally disabled\n");
1461         mutex_lock(&text_mutex);
1462         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1463                 head = &kprobe_table[i];
1464                 hlist_for_each_entry_rcu(p, node, head, hlist) {
1465                         if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1466                                 arch_disarm_kprobe(p);
1467                 }
1468         }
1469
1470         mutex_unlock(&text_mutex);
1471         mutex_unlock(&kprobe_mutex);
1472         /* Allow all currently running kprobes to complete */
1473         synchronize_sched();
1474         return;
1475
1476 already_disabled:
1477         mutex_unlock(&kprobe_mutex);
1478         return;
1479 }
1480
1481 /*
1482  * XXX: The debugfs bool file interface doesn't allow for callbacks
1483  * when the bool state is switched. We can reuse that facility when
1484  * available
1485  */
1486 static ssize_t read_enabled_file_bool(struct file *file,
1487                char __user *user_buf, size_t count, loff_t *ppos)
1488 {
1489         char buf[3];
1490
1491         if (!kprobes_all_disarmed)
1492                 buf[0] = '1';
1493         else
1494                 buf[0] = '0';
1495         buf[1] = '\n';
1496         buf[2] = 0x00;
1497         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1498 }
1499
1500 static ssize_t write_enabled_file_bool(struct file *file,
1501                const char __user *user_buf, size_t count, loff_t *ppos)
1502 {
1503         char buf[32];
1504         int buf_size;
1505
1506         buf_size = min(count, (sizeof(buf)-1));
1507         if (copy_from_user(buf, user_buf, buf_size))
1508                 return -EFAULT;
1509
1510         switch (buf[0]) {
1511         case 'y':
1512         case 'Y':
1513         case '1':
1514                 arm_all_kprobes();
1515                 break;
1516         case 'n':
1517         case 'N':
1518         case '0':
1519                 disarm_all_kprobes();
1520                 break;
1521         }
1522
1523         return count;
1524 }
1525
1526 static struct file_operations fops_kp = {
1527         .read =         read_enabled_file_bool,
1528         .write =        write_enabled_file_bool,
1529 };
1530
1531 static int __kprobes debugfs_kprobe_init(void)
1532 {
1533         struct dentry *dir, *file;
1534         unsigned int value = 1;
1535
1536         dir = debugfs_create_dir("kprobes", NULL);
1537         if (!dir)
1538                 return -ENOMEM;
1539
1540         file = debugfs_create_file("list", 0444, dir, NULL,
1541                                 &debugfs_kprobes_operations);
1542         if (!file) {
1543                 debugfs_remove(dir);
1544                 return -ENOMEM;
1545         }
1546
1547         file = debugfs_create_file("enabled", 0600, dir,
1548                                         &value, &fops_kp);
1549         if (!file) {
1550                 debugfs_remove(dir);
1551                 return -ENOMEM;
1552         }
1553
1554         return 0;
1555 }
1556
1557 late_initcall(debugfs_kprobe_init);
1558 #endif /* CONFIG_DEBUG_FS */
1559
1560 module_init(init_kprobes);
1561
1562 /* defined in arch/.../kernel/kprobes.c */
1563 EXPORT_SYMBOL_GPL(jprobe_return);