4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf
9 * @author Robert Richter <robert.richter@amd.com>
11 * Modified by Aravind Menon for Xen
12 * These modifications are:
13 * Copyright (C) 2005 Hewlett-Packard Co.
15 * This is the core of the buffer management. Each
16 * CPU buffer is processed and entered into the
17 * global event buffer. Such processing is necessary
18 * in several circumstances, mentioned below.
20 * The processing does the job of converting the
21 * transitory EIP value into a persistent dentry/offset
22 * value that the profiler can record at its leisure.
24 * See fs/dcookies.c for a description of the dentry/offset
29 #include <linux/workqueue.h>
30 #include <linux/notifier.h>
31 #include <linux/dcookies.h>
32 #include <linux/profile.h>
33 #include <linux/module.h>
35 #include <linux/oprofile.h>
36 #include <linux/sched.h>
37 #include <linux/gfp.h>
39 #include "oprofile_stats.h"
40 #include "event_buffer.h"
41 #include "cpu_buffer.h"
42 #include "buffer_sync.h"
44 static LIST_HEAD(dying_tasks);
45 static LIST_HEAD(dead_tasks);
46 static cpumask_var_t marked_cpus;
47 static DEFINE_SPINLOCK(task_mortuary);
48 static void process_task_mortuary(void);
51 #include <linux/percpu.h>
52 static DEFINE_PER_CPU(int, current_domain) = COORDINATOR_DOMAIN;
55 /* Take ownership of the task struct and place it on the
56 * list for processing. Only after two full buffer syncs
57 * does the task eventually get freed, because by then
58 * we are sure we will not reference it again.
59 * Can be invoked from softirq via RCU callback due to
60 * call_rcu() of the task struct, hence the _irqsave.
63 task_free_notify(struct notifier_block *self, unsigned long val, void *data)
66 struct task_struct *task = data;
67 spin_lock_irqsave(&task_mortuary, flags);
68 list_add(&task->tasks, &dying_tasks);
69 spin_unlock_irqrestore(&task_mortuary, flags);
73 /* The task is on its way out. A sync of the buffer means we can catch
74 * any remaining samples for this task.
77 task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
79 /* To avoid latency problems, we only process the current CPU,
80 * hoping that most samples for the task are on this CPU
82 sync_buffer(raw_smp_processor_id());
87 /* The task is about to try a do_munmap(). We peek at what it's going to
88 * do, and if it's an executable region, process the samples first, so
89 * we don't lose any. This does not have to be exact, it's a QoI issue
93 munmap_notify(struct notifier_block *self, unsigned long val, void *data)
95 unsigned long addr = (unsigned long)data;
96 struct mm_struct *mm = current->mm;
97 struct vm_area_struct *mpnt;
99 down_read(&mm->mmap_sem);
101 mpnt = find_vma(mm, addr);
102 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
103 up_read(&mm->mmap_sem);
104 /* To avoid latency problems, we only process the current CPU,
105 * hoping that most samples for the task are on this CPU
107 sync_buffer(raw_smp_processor_id());
111 up_read(&mm->mmap_sem);
116 /* We need to be told about new modules so we don't attribute to a previously
117 * loaded module, or drop the samples on the floor.
120 module_load_notify(struct notifier_block *self, unsigned long val, void *data)
122 #ifdef CONFIG_MODULES
123 if (val != MODULE_STATE_COMING)
126 /* FIXME: should we process all CPU buffers ? */
127 mutex_lock(&buffer_mutex);
128 add_event_entry(ESCAPE_CODE);
129 add_event_entry(MODULE_LOADED_CODE);
130 mutex_unlock(&buffer_mutex);
136 static struct notifier_block task_free_nb = {
137 .notifier_call = task_free_notify,
140 static struct notifier_block task_exit_nb = {
141 .notifier_call = task_exit_notify,
144 static struct notifier_block munmap_nb = {
145 .notifier_call = munmap_notify,
148 static struct notifier_block module_load_nb = {
149 .notifier_call = module_load_notify,
152 static void free_all_tasks(void)
154 /* make sure we don't leak task structs */
155 process_task_mortuary();
156 process_task_mortuary();
165 for_each_online_cpu(cpu)
166 per_cpu(current_domain, cpu) = COORDINATOR_DOMAIN;
169 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
172 err = task_handoff_register(&task_free_nb);
175 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
178 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
181 err = register_module_notifier(&module_load_nb);
190 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
192 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
194 task_handoff_unregister(&task_free_nb);
197 free_cpumask_var(marked_cpus);
205 unregister_module_notifier(&module_load_nb);
206 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
207 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
208 task_handoff_unregister(&task_free_nb);
209 barrier(); /* do all of the above first */
214 free_cpumask_var(marked_cpus);
218 /* Optimisation. We can manage without taking the dcookie sem
219 * because we cannot reach this code without at least one
220 * dcookie user still being registered (namely, the reader
221 * of the event buffer). */
222 static inline unsigned long fast_get_dcookie(struct path *path)
224 unsigned long cookie;
226 if (path->dentry->d_flags & DCACHE_COOKIE)
227 return (unsigned long)path->dentry;
228 get_dcookie(path, &cookie);
233 /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
234 * which corresponds loosely to "application name". This is
235 * not strictly necessary but allows oprofile to associate
236 * shared-library samples with particular applications
238 static unsigned long get_exec_dcookie(struct mm_struct *mm)
240 unsigned long cookie = NO_COOKIE;
241 struct vm_area_struct *vma;
246 for (vma = mm->mmap; vma; vma = vma->vm_next) {
249 if (!(vma->vm_flags & VM_EXECUTABLE))
251 cookie = fast_get_dcookie(&vma->vm_file->f_path);
260 /* Convert the EIP value of a sample into a persistent dentry/offset
261 * pair that can then be added to the global event buffer. We make
262 * sure to do this lookup before a mm->mmap modification happens so
263 * we don't lose track.
266 lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
268 unsigned long cookie = NO_COOKIE;
269 struct vm_area_struct *vma;
271 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
273 if (addr < vma->vm_start || addr >= vma->vm_end)
277 cookie = fast_get_dcookie(&vma->vm_file->f_path);
278 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
281 /* must be an anonymous map */
289 cookie = INVALID_COOKIE;
294 static unsigned long last_cookie = INVALID_COOKIE;
296 static void add_cpu_switch(int i)
298 add_event_entry(ESCAPE_CODE);
299 add_event_entry(CPU_SWITCH_CODE);
301 last_cookie = INVALID_COOKIE;
304 static void add_cpu_mode_switch(unsigned int cpu_mode)
306 add_event_entry(ESCAPE_CODE);
309 add_event_entry(USER_ENTER_SWITCH_CODE);
311 case CPU_MODE_KERNEL:
312 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
315 add_event_entry(XEN_ENTER_SWITCH_CODE);
323 static void add_domain_switch(unsigned long domain_id)
325 add_event_entry(ESCAPE_CODE);
326 add_event_entry(DOMAIN_SWITCH_CODE);
327 add_event_entry(domain_id);
332 add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
334 add_event_entry(ESCAPE_CODE);
335 add_event_entry(CTX_SWITCH_CODE);
336 add_event_entry(task->pid);
337 add_event_entry(cookie);
338 /* Another code for daemon back-compat */
339 add_event_entry(ESCAPE_CODE);
340 add_event_entry(CTX_TGID_CODE);
341 add_event_entry(task->tgid);
345 static void add_cookie_switch(unsigned long cookie)
347 add_event_entry(ESCAPE_CODE);
348 add_event_entry(COOKIE_SWITCH_CODE);
349 add_event_entry(cookie);
353 static void add_trace_begin(void)
355 add_event_entry(ESCAPE_CODE);
356 add_event_entry(TRACE_BEGIN_CODE);
359 static void add_data(struct op_entry *entry, struct mm_struct *mm)
361 unsigned long code, pc, val;
362 unsigned long cookie;
365 if (!op_cpu_buffer_get_data(entry, &code))
367 if (!op_cpu_buffer_get_data(entry, &pc))
369 if (!op_cpu_buffer_get_size(entry))
373 cookie = lookup_dcookie(mm, pc, &offset);
375 if (cookie == NO_COOKIE)
377 if (cookie == INVALID_COOKIE) {
378 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
381 if (cookie != last_cookie) {
382 add_cookie_switch(cookie);
383 last_cookie = cookie;
388 add_event_entry(ESCAPE_CODE);
389 add_event_entry(code);
390 add_event_entry(offset); /* Offset from Dcookie */
392 while (op_cpu_buffer_get_data(entry, &val))
393 add_event_entry(val);
396 static inline void add_sample_entry(unsigned long offset, unsigned long event)
398 add_event_entry(offset);
399 add_event_entry(event);
404 * Add a sample to the global event buffer. If possible the
405 * sample is converted into a persistent dentry/offset pair
406 * for later lookup from userspace. Return 0 on failure.
409 add_sample(struct mm_struct *mm, struct op_sample *s, int cpu_mode)
411 unsigned long cookie;
414 if (cpu_mode >= CPU_MODE_KERNEL) {
415 add_sample_entry(s->eip, s->event);
419 /* add userspace sample */
422 atomic_inc(&oprofile_stats.sample_lost_no_mm);
426 cookie = lookup_dcookie(mm, s->eip, &offset);
428 if (cookie == INVALID_COOKIE) {
429 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
433 if (cookie != last_cookie) {
434 add_cookie_switch(cookie);
435 last_cookie = cookie;
438 add_sample_entry(offset, s->event);
444 static void release_mm(struct mm_struct *mm)
448 up_read(&mm->mmap_sem);
453 static struct mm_struct *take_tasks_mm(struct task_struct *task)
455 struct mm_struct *mm = get_task_mm(task);
457 down_read(&mm->mmap_sem);
462 static inline int is_code(unsigned long val)
464 return val == ESCAPE_CODE;
468 /* Move tasks along towards death. Any tasks on dead_tasks
469 * will definitely have no remaining references in any
470 * CPU buffers at this point, because we use two lists,
471 * and to have reached the list, it must have gone through
472 * one full sync already.
474 static void process_task_mortuary(void)
477 LIST_HEAD(local_dead_tasks);
478 struct task_struct *task;
479 struct task_struct *ttask;
481 spin_lock_irqsave(&task_mortuary, flags);
483 list_splice_init(&dead_tasks, &local_dead_tasks);
484 list_splice_init(&dying_tasks, &dead_tasks);
486 spin_unlock_irqrestore(&task_mortuary, flags);
488 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
489 list_del(&task->tasks);
495 static void mark_done(int cpu)
499 cpumask_set_cpu(cpu, marked_cpus);
501 for_each_online_cpu(i) {
502 if (!cpumask_test_cpu(i, marked_cpus))
506 /* All CPUs have been processed at least once,
507 * we can process the mortuary once
509 process_task_mortuary();
511 cpumask_clear(marked_cpus);
515 /* FIXME: this is not sufficient if we implement syscall barrier backtrace
516 * traversal, the code switch to sb_sample_start at first kernel enter/exit
517 * switch so we need a fifth state and some special handling in sync_buffer()
526 /* Sync one of the CPU's buffers into the global event buffer.
527 * Here we need to go through each batch of samples punctuated
528 * by context switch notes, taking the task's mmap_sem and doing
529 * lookup in task->mm->mmap to convert EIP into dcookie/offset
532 void sync_buffer(int cpu)
534 struct mm_struct *mm = NULL;
535 struct mm_struct *oldmm;
537 struct task_struct *new;
538 unsigned long cookie = 0;
539 int cpu_mode = CPU_MODE_KERNEL;
540 sync_buffer_state state = sb_buffer_start;
542 unsigned long available;
544 struct op_entry entry;
545 struct op_sample *sample;
547 mutex_lock(&buffer_mutex);
552 /* We need to assign the first samples in this CPU buffer to the
553 same domain that we were processing at the last sync_buffer */
554 if (per_cpu(current_domain, cpu) != COORDINATOR_DOMAIN)
555 add_domain_switch(per_cpu(current_domain, cpu));
558 op_cpu_buffer_reset(cpu);
559 available = op_cpu_buffer_entries(cpu);
561 for (i = 0; i < available; ++i) {
562 sample = op_cpu_buffer_read_entry(&entry, cpu);
566 if (is_code(sample->eip)) {
567 flags = sample->event;
568 if (flags & TRACE_BEGIN) {
572 if (flags & KERNEL_CTX_SWITCH) {
573 /* kernel/userspace switch */
574 cpu_mode = flags & CPU_MODE_MASK;
575 if (state == sb_buffer_start)
576 state = sb_sample_start;
577 add_cpu_mode_switch(cpu_mode);
579 if (flags & USER_CTX_SWITCH
580 && op_cpu_buffer_get_data(&entry, &val)) {
581 /* userspace context switch */
582 new = (struct task_struct *)val;
585 mm = take_tasks_mm(new);
587 cookie = get_exec_dcookie(mm);
588 add_user_ctx_switch(new, cookie);
591 if ((flags & DOMAIN_SWITCH)
592 && op_cpu_buffer_get_data(&entry, &val)) {
593 per_cpu(current_domain, cpu) = val;
594 add_domain_switch(val);
597 if (op_cpu_buffer_get_size(&entry))
598 add_data(&entry, mm);
603 if (per_cpu(current_domain, cpu) != COORDINATOR_DOMAIN) {
604 add_sample_entry(sample->eip, sample->event);
609 if (state < sb_bt_start)
613 if (add_sample(mm, sample, cpu_mode))
616 /* ignore backtraces if failed to add a sample */
617 if (state == sb_bt_start) {
618 state = sb_bt_ignore;
619 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
625 /* We reset domain to COORDINATOR at each CPU switch */
626 if (per_cpu(current_domain, cpu) != COORDINATOR_DOMAIN)
627 add_domain_switch(COORDINATOR_DOMAIN);
632 mutex_unlock(&buffer_mutex);
635 /* The function can be used to add a buffer worth of data directly to
636 * the kernel buffer. The buffer is assumed to be a circular buffer.
637 * Take the entries from index start and end at index end, wrapping
640 void oprofile_put_buff(unsigned long *buf, unsigned int start,
641 unsigned int stop, unsigned int max)
647 mutex_lock(&buffer_mutex);
649 add_event_entry(buf[i++]);
655 mutex_unlock(&buffer_mutex);