4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex);
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
40 trace_get_fields(struct ftrace_event_call *event_call)
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
47 static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
51 struct ftrace_event_field *field;
53 field = kzalloc(sizeof(*field), GFP_KERNEL);
57 field->name = kstrdup(name, GFP_KERNEL);
61 field->type = kstrdup(type, GFP_KERNEL);
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
68 field->filter_type = filter_type;
70 field->offset = offset;
72 field->is_signed = is_signed;
74 list_add(&field->link, head);
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
90 struct list_head *head;
92 if (WARN_ON(!call->class))
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
99 EXPORT_SYMBOL_GPL(trace_define_field);
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
104 offsetof(typeof(ent), item), \
106 is_signed_type(type), FILTER_OTHER); \
110 static int trace_define_common_fields(void)
113 struct trace_entry ent;
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
119 __common_field(int, padding);
124 void trace_destroy_fields(struct ftrace_event_call *call)
126 struct ftrace_event_field *field, *next;
127 struct list_head *head;
129 head = trace_get_fields(call);
130 list_for_each_entry_safe(field, next, head, link) {
131 list_del(&field->link);
138 int trace_event_raw_init(struct ftrace_event_call *call)
142 id = register_ftrace_event(&call->event);
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
150 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
153 case TRACE_REG_REGISTER:
154 return tracepoint_probe_register(call->name,
157 case TRACE_REG_UNREGISTER:
158 tracepoint_probe_unregister(call->name,
163 #ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->perf_probe,
168 case TRACE_REG_PERF_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->perf_probe,
177 EXPORT_SYMBOL_GPL(ftrace_event_reg);
179 void trace_event_enable_cmd_record(bool enable)
181 struct ftrace_event_call *call;
183 mutex_lock(&event_mutex);
184 list_for_each_entry(call, &ftrace_events, list) {
185 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
189 tracing_start_cmdline_record();
190 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
192 tracing_stop_cmdline_record();
193 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
196 mutex_unlock(&event_mutex);
199 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
206 if (call->flags & TRACE_EVENT_FL_ENABLED) {
207 call->flags &= ~TRACE_EVENT_FL_ENABLED;
208 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209 tracing_stop_cmdline_record();
210 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
212 call->class->reg(call, TRACE_REG_UNREGISTER);
216 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
217 if (trace_flags & TRACE_ITER_RECORD_CMD) {
218 tracing_start_cmdline_record();
219 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
221 ret = call->class->reg(call, TRACE_REG_REGISTER);
223 tracing_stop_cmdline_record();
224 pr_info("event trace: Could not enable event "
228 call->flags |= TRACE_EVENT_FL_ENABLED;
236 static void ftrace_clear_events(void)
238 struct ftrace_event_call *call;
240 mutex_lock(&event_mutex);
241 list_for_each_entry(call, &ftrace_events, list) {
242 ftrace_event_enable_disable(call, 0);
244 mutex_unlock(&event_mutex);
247 static void __put_system(struct event_subsystem *system)
249 struct event_filter *filter = system->filter;
251 WARN_ON_ONCE(system->ref_count == 0);
252 if (--system->ref_count)
256 kfree(filter->filter_string);
263 static void __get_system(struct event_subsystem *system)
265 WARN_ON_ONCE(system->ref_count == 0);
269 static void put_system(struct event_subsystem *system)
271 mutex_lock(&event_mutex);
272 __put_system(system);
273 mutex_unlock(&event_mutex);
277 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
279 static int __ftrace_set_clr_event(const char *match, const char *sub,
280 const char *event, int set)
282 struct ftrace_event_call *call;
285 mutex_lock(&event_mutex);
286 list_for_each_entry(call, &ftrace_events, list) {
288 if (!call->name || !call->class || !call->class->reg)
292 strcmp(match, call->name) != 0 &&
293 strcmp(match, call->class->system) != 0)
296 if (sub && strcmp(sub, call->class->system) != 0)
299 if (event && strcmp(event, call->name) != 0)
302 ftrace_event_enable_disable(call, set);
306 mutex_unlock(&event_mutex);
311 static int ftrace_set_clr_event(char *buf, int set)
313 char *event = NULL, *sub = NULL, *match;
316 * The buf format can be <subsystem>:<event-name>
317 * *:<event-name> means any event by that name.
318 * :<event-name> is the same.
320 * <subsystem>:* means all events in that subsystem
321 * <subsystem>: means the same.
323 * <name> (no ':') means all events in a subsystem with
324 * the name <name> or any event that matches <name>
327 match = strsep(&buf, ":");
333 if (!strlen(sub) || strcmp(sub, "*") == 0)
335 if (!strlen(event) || strcmp(event, "*") == 0)
339 return __ftrace_set_clr_event(match, sub, event, set);
343 * trace_set_clr_event - enable or disable an event
344 * @system: system name to match (NULL for any system)
345 * @event: event name to match (NULL for all events, within system)
346 * @set: 1 to enable, 0 to disable
348 * This is a way for other parts of the kernel to enable or disable
351 * Returns 0 on success, -EINVAL if the parameters do not match any
354 int trace_set_clr_event(const char *system, const char *event, int set)
356 return __ftrace_set_clr_event(NULL, system, event, set);
358 EXPORT_SYMBOL_GPL(trace_set_clr_event);
360 /* 128 should be much more than enough */
361 #define EVENT_BUF_SIZE 127
364 ftrace_event_write(struct file *file, const char __user *ubuf,
365 size_t cnt, loff_t *ppos)
367 struct trace_parser parser;
373 ret = tracing_update_buffers();
377 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
380 read = trace_get_user(&parser, ubuf, cnt, ppos);
382 if (read >= 0 && trace_parser_loaded((&parser))) {
385 if (*parser.buffer == '!')
388 parser.buffer[parser.idx] = 0;
390 ret = ftrace_set_clr_event(parser.buffer + !set, set);
398 trace_parser_put(&parser);
404 t_next(struct seq_file *m, void *v, loff_t *pos)
406 struct ftrace_event_call *call = v;
410 list_for_each_entry_continue(call, &ftrace_events, list) {
412 * The ftrace subsystem is for showing formats only.
413 * They can not be enabled or disabled via the event files.
415 if (call->class && call->class->reg)
422 static void *t_start(struct seq_file *m, loff_t *pos)
424 struct ftrace_event_call *call;
427 mutex_lock(&event_mutex);
429 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
430 for (l = 0; l <= *pos; ) {
431 call = t_next(m, call, &l);
439 s_next(struct seq_file *m, void *v, loff_t *pos)
441 struct ftrace_event_call *call = v;
445 list_for_each_entry_continue(call, &ftrace_events, list) {
446 if (call->flags & TRACE_EVENT_FL_ENABLED)
453 static void *s_start(struct seq_file *m, loff_t *pos)
455 struct ftrace_event_call *call;
458 mutex_lock(&event_mutex);
460 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
461 for (l = 0; l <= *pos; ) {
462 call = s_next(m, call, &l);
469 static int t_show(struct seq_file *m, void *v)
471 struct ftrace_event_call *call = v;
473 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
474 seq_printf(m, "%s:", call->class->system);
475 seq_printf(m, "%s\n", call->name);
480 static void t_stop(struct seq_file *m, void *p)
482 mutex_unlock(&event_mutex);
486 ftrace_event_seq_open(struct inode *inode, struct file *file)
488 const struct seq_operations *seq_ops;
490 if ((file->f_mode & FMODE_WRITE) &&
491 (file->f_flags & O_TRUNC))
492 ftrace_clear_events();
494 seq_ops = inode->i_private;
495 return seq_open(file, seq_ops);
499 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
502 struct ftrace_event_call *call = filp->private_data;
505 if (call->flags & TRACE_EVENT_FL_ENABLED)
510 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
514 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
517 struct ftrace_event_call *call = filp->private_data;
522 if (cnt >= sizeof(buf))
525 if (copy_from_user(&buf, ubuf, cnt))
530 ret = strict_strtoul(buf, 10, &val);
534 ret = tracing_update_buffers();
541 mutex_lock(&event_mutex);
542 ret = ftrace_event_enable_disable(call, val);
543 mutex_unlock(&event_mutex);
552 return ret ? ret : cnt;
556 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
559 const char set_to_char[4] = { '?', '0', '1', 'X' };
560 const char *system = filp->private_data;
561 struct ftrace_event_call *call;
566 mutex_lock(&event_mutex);
567 list_for_each_entry(call, &ftrace_events, list) {
568 if (!call->name || !call->class || !call->class->reg)
571 if (system && strcmp(call->class->system, system) != 0)
575 * We need to find out if all the events are set
576 * or if all events or cleared, or if we have
579 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
582 * If we have a mixture, no need to look further.
587 mutex_unlock(&event_mutex);
589 buf[0] = set_to_char[set];
592 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
598 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
601 const char *system = filp->private_data;
606 if (cnt >= sizeof(buf))
609 if (copy_from_user(&buf, ubuf, cnt))
614 ret = strict_strtoul(buf, 10, &val);
618 ret = tracing_update_buffers();
622 if (val != 0 && val != 1)
625 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
639 FORMAT_FIELD_SEPERATOR = 2,
643 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
645 struct ftrace_event_call *call = m->private;
646 struct ftrace_event_field *field;
647 struct list_head *common_head = &ftrace_common_fields;
648 struct list_head *head = trace_get_fields(call);
652 switch ((unsigned long)v) {
654 if (unlikely(list_empty(common_head)))
657 field = list_entry(common_head->prev,
658 struct ftrace_event_field, link);
661 case FORMAT_FIELD_SEPERATOR:
662 if (unlikely(list_empty(head)))
665 field = list_entry(head->prev, struct ftrace_event_field, link);
668 case FORMAT_PRINTFMT:
674 if (field->link.prev == common_head)
675 return (void *)FORMAT_FIELD_SEPERATOR;
676 else if (field->link.prev == head)
677 return (void *)FORMAT_PRINTFMT;
679 field = list_entry(field->link.prev, struct ftrace_event_field, link);
684 static void *f_start(struct seq_file *m, loff_t *pos)
689 /* Start by showing the header */
691 return (void *)FORMAT_HEADER;
693 p = (void *)FORMAT_HEADER;
695 p = f_next(m, p, &l);
696 } while (p && l < *pos);
701 static int f_show(struct seq_file *m, void *v)
703 struct ftrace_event_call *call = m->private;
704 struct ftrace_event_field *field;
705 const char *array_descriptor;
707 switch ((unsigned long)v) {
709 seq_printf(m, "name: %s\n", call->name);
710 seq_printf(m, "ID: %d\n", call->event.type);
711 seq_printf(m, "format:\n");
714 case FORMAT_FIELD_SEPERATOR:
718 case FORMAT_PRINTFMT:
719 seq_printf(m, "\nprint fmt: %s\n",
727 * Smartly shows the array type(except dynamic array).
730 * If TYPE := TYPE[LEN], it is shown:
731 * field:TYPE VAR[LEN]
733 array_descriptor = strchr(field->type, '[');
735 if (!strncmp(field->type, "__data_loc", 10))
736 array_descriptor = NULL;
738 if (!array_descriptor)
739 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
740 field->type, field->name, field->offset,
741 field->size, !!field->is_signed);
743 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
744 (int)(array_descriptor - field->type),
745 field->type, field->name,
746 array_descriptor, field->offset,
747 field->size, !!field->is_signed);
752 static void f_stop(struct seq_file *m, void *p)
756 static const struct seq_operations trace_format_seq_ops = {
763 static int trace_format_open(struct inode *inode, struct file *file)
765 struct ftrace_event_call *call = inode->i_private;
769 ret = seq_open(file, &trace_format_seq_ops);
773 m = file->private_data;
780 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
782 struct ftrace_event_call *call = filp->private_data;
789 s = kmalloc(sizeof(*s), GFP_KERNEL);
794 trace_seq_printf(s, "%d\n", call->event.type);
796 r = simple_read_from_buffer(ubuf, cnt, ppos,
803 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
806 struct ftrace_event_call *call = filp->private_data;
813 s = kmalloc(sizeof(*s), GFP_KERNEL);
819 print_event_filter(call, s);
820 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
828 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
831 struct ftrace_event_call *call = filp->private_data;
835 if (cnt >= PAGE_SIZE)
838 buf = (char *)__get_free_page(GFP_TEMPORARY);
842 if (copy_from_user(buf, ubuf, cnt)) {
843 free_page((unsigned long) buf);
848 err = apply_event_filter(call, buf);
849 free_page((unsigned long) buf);
858 static LIST_HEAD(event_subsystems);
860 static int subsystem_open(struct inode *inode, struct file *filp)
862 struct event_subsystem *system = NULL;
865 /* Make sure the system still exists */
866 mutex_lock(&event_mutex);
867 list_for_each_entry(system, &event_subsystems, list) {
868 if (system == inode->i_private) {
869 /* Don't open systems with no events */
870 if (!system->nr_events) {
874 __get_system(system);
878 mutex_unlock(&event_mutex);
880 if (system != inode->i_private)
883 ret = tracing_open_generic(inode, filp);
890 static int subsystem_release(struct inode *inode, struct file *file)
892 struct event_subsystem *system = inode->i_private;
900 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
903 struct event_subsystem *system = filp->private_data;
910 s = kmalloc(sizeof(*s), GFP_KERNEL);
916 print_subsystem_event_filter(system, s);
917 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
925 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
928 struct event_subsystem *system = filp->private_data;
932 if (cnt >= PAGE_SIZE)
935 buf = (char *)__get_free_page(GFP_TEMPORARY);
939 if (copy_from_user(buf, ubuf, cnt)) {
940 free_page((unsigned long) buf);
945 err = apply_subsystem_event_filter(system, buf);
946 free_page((unsigned long) buf);
956 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
958 int (*func)(struct trace_seq *s) = filp->private_data;
965 s = kmalloc(sizeof(*s), GFP_KERNEL);
972 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
979 static const struct seq_operations show_event_seq_ops = {
986 static const struct seq_operations show_set_event_seq_ops = {
993 static const struct file_operations ftrace_avail_fops = {
994 .open = ftrace_event_seq_open,
997 .release = seq_release,
1000 static const struct file_operations ftrace_set_event_fops = {
1001 .open = ftrace_event_seq_open,
1003 .write = ftrace_event_write,
1004 .llseek = seq_lseek,
1005 .release = seq_release,
1008 static const struct file_operations ftrace_enable_fops = {
1009 .open = tracing_open_generic,
1010 .read = event_enable_read,
1011 .write = event_enable_write,
1012 .llseek = default_llseek,
1015 static const struct file_operations ftrace_event_format_fops = {
1016 .open = trace_format_open,
1018 .llseek = seq_lseek,
1019 .release = seq_release,
1022 static const struct file_operations ftrace_event_id_fops = {
1023 .open = tracing_open_generic,
1024 .read = event_id_read,
1025 .llseek = default_llseek,
1028 static const struct file_operations ftrace_event_filter_fops = {
1029 .open = tracing_open_generic,
1030 .read = event_filter_read,
1031 .write = event_filter_write,
1032 .llseek = default_llseek,
1035 static const struct file_operations ftrace_subsystem_filter_fops = {
1036 .open = subsystem_open,
1037 .read = subsystem_filter_read,
1038 .write = subsystem_filter_write,
1039 .llseek = default_llseek,
1040 .release = subsystem_release,
1043 static const struct file_operations ftrace_system_enable_fops = {
1044 .open = tracing_open_generic,
1045 .read = system_enable_read,
1046 .write = system_enable_write,
1047 .llseek = default_llseek,
1050 static const struct file_operations ftrace_show_header_fops = {
1051 .open = tracing_open_generic,
1052 .read = show_header,
1053 .llseek = default_llseek,
1056 static struct dentry *event_trace_events_dir(void)
1058 static struct dentry *d_tracer;
1059 static struct dentry *d_events;
1064 d_tracer = tracing_init_dentry();
1068 d_events = debugfs_create_dir("events", d_tracer);
1070 pr_warning("Could not create debugfs "
1071 "'events' directory\n");
1076 static struct dentry *
1077 event_subsystem_dir(const char *name, struct dentry *d_events)
1079 struct event_subsystem *system;
1080 struct dentry *entry;
1082 /* First see if we did not already create this dir */
1083 list_for_each_entry(system, &event_subsystems, list) {
1084 if (strcmp(system->name, name) == 0) {
1085 __get_system(system);
1086 system->nr_events++;
1087 return system->entry;
1091 /* need to create new entry */
1092 system = kmalloc(sizeof(*system), GFP_KERNEL);
1094 pr_warning("No memory to create event subsystem %s\n",
1099 system->entry = debugfs_create_dir(name, d_events);
1100 if (!system->entry) {
1101 pr_warning("Could not create event subsystem %s\n",
1107 system->nr_events = 1;
1108 system->ref_count = 1;
1109 system->name = kstrdup(name, GFP_KERNEL);
1110 if (!system->name) {
1111 debugfs_remove(system->entry);
1116 list_add(&system->list, &event_subsystems);
1118 system->filter = NULL;
1120 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1121 if (!system->filter) {
1122 pr_warning("Could not allocate filter for subsystem "
1124 return system->entry;
1127 entry = debugfs_create_file("filter", 0644, system->entry, system,
1128 &ftrace_subsystem_filter_fops);
1130 kfree(system->filter);
1131 system->filter = NULL;
1132 pr_warning("Could not create debugfs "
1133 "'%s/filter' entry\n", name);
1136 trace_create_file("enable", 0644, system->entry,
1137 (void *)system->name,
1138 &ftrace_system_enable_fops);
1140 return system->entry;
1144 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1145 const struct file_operations *id,
1146 const struct file_operations *enable,
1147 const struct file_operations *filter,
1148 const struct file_operations *format)
1150 struct list_head *head;
1154 * If the trace point header did not define TRACE_SYSTEM
1155 * then the system would be called "TRACE_SYSTEM".
1157 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1158 d_events = event_subsystem_dir(call->class->system, d_events);
1160 call->dir = debugfs_create_dir(call->name, d_events);
1162 pr_warning("Could not create debugfs "
1163 "'%s' directory\n", call->name);
1167 if (call->class->reg)
1168 trace_create_file("enable", 0644, call->dir, call,
1171 #ifdef CONFIG_PERF_EVENTS
1172 if (call->event.type && call->class->reg)
1173 trace_create_file("id", 0444, call->dir, call,
1178 * Other events may have the same class. Only update
1179 * the fields if they are not already defined.
1181 head = trace_get_fields(call);
1182 if (list_empty(head)) {
1183 ret = call->class->define_fields(call);
1185 pr_warning("Could not initialize trace point"
1186 " events/%s\n", call->name);
1190 trace_create_file("filter", 0644, call->dir, call,
1193 trace_create_file("format", 0444, call->dir, call,
1200 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1201 const struct file_operations *id,
1202 const struct file_operations *enable,
1203 const struct file_operations *filter,
1204 const struct file_operations *format)
1206 struct dentry *d_events;
1209 /* The linker may leave blanks */
1213 if (call->class->raw_init) {
1214 ret = call->class->raw_init(call);
1217 pr_warning("Could not initialize trace events/%s\n",
1223 d_events = event_trace_events_dir();
1227 ret = event_create_dir(call, d_events, id, enable, filter, format);
1229 list_add(&call->list, &ftrace_events);
1235 /* Add an additional event_call dynamically */
1236 int trace_add_event_call(struct ftrace_event_call *call)
1239 mutex_lock(&event_mutex);
1240 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1241 &ftrace_enable_fops,
1242 &ftrace_event_filter_fops,
1243 &ftrace_event_format_fops);
1244 mutex_unlock(&event_mutex);
1248 static void remove_subsystem_dir(const char *name)
1250 struct event_subsystem *system;
1252 if (strcmp(name, TRACE_SYSTEM) == 0)
1255 list_for_each_entry(system, &event_subsystems, list) {
1256 if (strcmp(system->name, name) == 0) {
1257 if (!--system->nr_events) {
1258 debugfs_remove_recursive(system->entry);
1259 list_del(&system->list);
1260 __put_system(system);
1268 * Must be called under locking both of event_mutex and trace_event_mutex.
1270 static void __trace_remove_event_call(struct ftrace_event_call *call)
1272 ftrace_event_enable_disable(call, 0);
1273 if (call->event.funcs)
1274 __unregister_ftrace_event(&call->event);
1275 debugfs_remove_recursive(call->dir);
1276 list_del(&call->list);
1277 trace_destroy_fields(call);
1278 destroy_preds(call);
1279 remove_subsystem_dir(call->class->system);
1282 /* Remove an event_call */
1283 void trace_remove_event_call(struct ftrace_event_call *call)
1285 mutex_lock(&event_mutex);
1286 down_write(&trace_event_mutex);
1287 __trace_remove_event_call(call);
1288 up_write(&trace_event_mutex);
1289 mutex_unlock(&event_mutex);
1292 #define for_each_event(event, start, end) \
1293 for (event = start; \
1294 (unsigned long)event < (unsigned long)end; \
1297 #ifdef CONFIG_MODULES
1299 static LIST_HEAD(ftrace_module_file_list);
1302 * Modules must own their file_operations to keep up with
1303 * reference counting.
1305 struct ftrace_module_file_ops {
1306 struct list_head list;
1308 struct file_operations id;
1309 struct file_operations enable;
1310 struct file_operations format;
1311 struct file_operations filter;
1314 static struct ftrace_module_file_ops *
1315 trace_create_file_ops(struct module *mod)
1317 struct ftrace_module_file_ops *file_ops;
1320 * This is a bit of a PITA. To allow for correct reference
1321 * counting, modules must "own" their file_operations.
1322 * To do this, we allocate the file operations that will be
1323 * used in the event directory.
1326 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1330 file_ops->mod = mod;
1332 file_ops->id = ftrace_event_id_fops;
1333 file_ops->id.owner = mod;
1335 file_ops->enable = ftrace_enable_fops;
1336 file_ops->enable.owner = mod;
1338 file_ops->filter = ftrace_event_filter_fops;
1339 file_ops->filter.owner = mod;
1341 file_ops->format = ftrace_event_format_fops;
1342 file_ops->format.owner = mod;
1344 list_add(&file_ops->list, &ftrace_module_file_list);
1349 static void trace_module_add_events(struct module *mod)
1351 struct ftrace_module_file_ops *file_ops = NULL;
1352 struct ftrace_event_call **call, **start, **end;
1354 start = mod->trace_events;
1355 end = mod->trace_events + mod->num_trace_events;
1360 file_ops = trace_create_file_ops(mod);
1364 for_each_event(call, start, end) {
1365 __trace_add_event_call(*call, mod,
1366 &file_ops->id, &file_ops->enable,
1367 &file_ops->filter, &file_ops->format);
1371 static void trace_module_remove_events(struct module *mod)
1373 struct ftrace_module_file_ops *file_ops;
1374 struct ftrace_event_call *call, *p;
1377 down_write(&trace_event_mutex);
1378 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1379 if (call->mod == mod) {
1381 __trace_remove_event_call(call);
1385 /* Now free the file_operations */
1386 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1387 if (file_ops->mod == mod)
1390 if (&file_ops->list != &ftrace_module_file_list) {
1391 list_del(&file_ops->list);
1396 * It is safest to reset the ring buffer if the module being unloaded
1397 * registered any events.
1400 tracing_reset_current_online_cpus();
1401 up_write(&trace_event_mutex);
1404 static int trace_module_notify(struct notifier_block *self,
1405 unsigned long val, void *data)
1407 struct module *mod = data;
1409 mutex_lock(&event_mutex);
1411 case MODULE_STATE_COMING:
1412 trace_module_add_events(mod);
1414 case MODULE_STATE_GOING:
1415 trace_module_remove_events(mod);
1418 mutex_unlock(&event_mutex);
1423 static int trace_module_notify(struct notifier_block *self,
1424 unsigned long val, void *data)
1428 #endif /* CONFIG_MODULES */
1430 static struct notifier_block trace_module_nb = {
1431 .notifier_call = trace_module_notify,
1435 extern struct ftrace_event_call *__start_ftrace_events[];
1436 extern struct ftrace_event_call *__stop_ftrace_events[];
1438 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1440 static __init int setup_trace_event(char *str)
1442 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1443 ring_buffer_expanded = 1;
1444 tracing_selftest_disabled = 1;
1448 __setup("trace_event=", setup_trace_event);
1450 static __init int event_trace_init(void)
1452 struct ftrace_event_call **call;
1453 struct dentry *d_tracer;
1454 struct dentry *entry;
1455 struct dentry *d_events;
1457 char *buf = bootup_event_buf;
1460 d_tracer = tracing_init_dentry();
1464 entry = debugfs_create_file("available_events", 0444, d_tracer,
1465 (void *)&show_event_seq_ops,
1466 &ftrace_avail_fops);
1468 pr_warning("Could not create debugfs "
1469 "'available_events' entry\n");
1471 entry = debugfs_create_file("set_event", 0644, d_tracer,
1472 (void *)&show_set_event_seq_ops,
1473 &ftrace_set_event_fops);
1475 pr_warning("Could not create debugfs "
1476 "'set_event' entry\n");
1478 d_events = event_trace_events_dir();
1482 /* ring buffer internal formats */
1483 trace_create_file("header_page", 0444, d_events,
1484 ring_buffer_print_page_header,
1485 &ftrace_show_header_fops);
1487 trace_create_file("header_event", 0444, d_events,
1488 ring_buffer_print_entry_header,
1489 &ftrace_show_header_fops);
1491 trace_create_file("enable", 0644, d_events,
1492 NULL, &ftrace_system_enable_fops);
1494 if (trace_define_common_fields())
1495 pr_warning("tracing: Failed to allocate common fields");
1497 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1498 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1499 &ftrace_enable_fops,
1500 &ftrace_event_filter_fops,
1501 &ftrace_event_format_fops);
1505 token = strsep(&buf, ",");
1512 ret = ftrace_set_clr_event(token, 1);
1514 pr_warning("Failed to enable trace event: %s\n", token);
1517 ret = register_module_notifier(&trace_module_nb);
1519 pr_warning("Failed to register trace events module notifier\n");
1523 fs_initcall(event_trace_init);
1525 #ifdef CONFIG_FTRACE_STARTUP_TEST
1527 static DEFINE_SPINLOCK(test_spinlock);
1528 static DEFINE_SPINLOCK(test_spinlock_irq);
1529 static DEFINE_MUTEX(test_mutex);
1531 static __init void test_work(struct work_struct *dummy)
1533 spin_lock(&test_spinlock);
1534 spin_lock_irq(&test_spinlock_irq);
1536 spin_unlock_irq(&test_spinlock_irq);
1537 spin_unlock(&test_spinlock);
1539 mutex_lock(&test_mutex);
1541 mutex_unlock(&test_mutex);
1544 static __init int event_test_thread(void *unused)
1548 test_malloc = kmalloc(1234, GFP_KERNEL);
1550 pr_info("failed to kmalloc\n");
1552 schedule_on_each_cpu(test_work);
1556 set_current_state(TASK_INTERRUPTIBLE);
1557 while (!kthread_should_stop())
1564 * Do various things that may trigger events.
1566 static __init void event_test_stuff(void)
1568 struct task_struct *test_thread;
1570 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1572 kthread_stop(test_thread);
1576 * For every trace event defined, we will test each trace point separately,
1577 * and then by groups, and finally all trace points.
1579 static __init void event_trace_self_tests(void)
1581 struct ftrace_event_call *call;
1582 struct event_subsystem *system;
1585 pr_info("Running tests on trace events:\n");
1587 list_for_each_entry(call, &ftrace_events, list) {
1589 /* Only test those that have a probe */
1590 if (!call->class || !call->class->probe)
1594 * Testing syscall events here is pretty useless, but
1595 * we still do it if configured. But this is time consuming.
1596 * What we really need is a user thread to perform the
1597 * syscalls as we test.
1599 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1600 if (call->class->system &&
1601 strcmp(call->class->system, "syscalls") == 0)
1605 pr_info("Testing event %s: ", call->name);
1608 * If an event is already enabled, someone is using
1609 * it and the self test should not be on.
1611 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1612 pr_warning("Enabled event during self test!\n");
1617 ftrace_event_enable_disable(call, 1);
1619 ftrace_event_enable_disable(call, 0);
1624 /* Now test at the sub system level */
1626 pr_info("Running tests on trace event systems:\n");
1628 list_for_each_entry(system, &event_subsystems, list) {
1630 /* the ftrace system is special, skip it */
1631 if (strcmp(system->name, "ftrace") == 0)
1634 pr_info("Testing event system %s: ", system->name);
1636 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1637 if (WARN_ON_ONCE(ret)) {
1638 pr_warning("error enabling system %s\n",
1645 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1646 if (WARN_ON_ONCE(ret))
1647 pr_warning("error disabling system %s\n",
1653 /* Test with all events enabled */
1655 pr_info("Running tests on all trace events:\n");
1656 pr_info("Testing all events: ");
1658 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1659 if (WARN_ON_ONCE(ret)) {
1660 pr_warning("error enabling all events\n");
1667 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1668 if (WARN_ON_ONCE(ret)) {
1669 pr_warning("error disabling all events\n");
1676 #ifdef CONFIG_FUNCTION_TRACER
1678 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1681 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1683 struct ring_buffer_event *event;
1684 struct ring_buffer *buffer;
1685 struct ftrace_entry *entry;
1686 unsigned long flags;
1691 pc = preempt_count();
1692 preempt_disable_notrace();
1693 cpu = raw_smp_processor_id();
1694 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1699 local_save_flags(flags);
1701 event = trace_current_buffer_lock_reserve(&buffer,
1702 TRACE_FN, sizeof(*entry),
1706 entry = ring_buffer_event_data(event);
1708 entry->parent_ip = parent_ip;
1710 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1713 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1714 preempt_enable_notrace();
1717 static struct ftrace_ops trace_ops __initdata =
1719 .func = function_test_events_call,
1722 static __init void event_trace_self_test_with_function(void)
1725 ret = register_ftrace_function(&trace_ops);
1726 if (WARN_ON(ret < 0)) {
1727 pr_info("Failed to enable function tracer for event tests\n");
1730 pr_info("Running tests again, along with the function tracer\n");
1731 event_trace_self_tests();
1732 unregister_ftrace_function(&trace_ops);
1735 static __init void event_trace_self_test_with_function(void)
1740 static __init int event_trace_self_tests_init(void)
1742 if (!tracing_selftest_disabled) {
1743 event_trace_self_tests();
1744 event_trace_self_test_with_function();
1750 late_initcall(event_trace_self_tests_init);