#include <linux/bitops.h>
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
+#include <trace/fs.h>
#include <linux/precache.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
+DEFINE_TRACE(fs_buffer_wait_start);
+DEFINE_TRACE(fs_buffer_wait_end);
+
inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
*/
void __wait_on_buffer(struct buffer_head * bh)
{
+ trace_fs_buffer_wait_start(bh);
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+ trace_fs_buffer_wait_end(bh);
}
EXPORT_SYMBOL(__wait_on_buffer);
#include <linux/mm.h>
#include <linux/eventpoll.h>
#include <linux/fs_struct.h>
+#include <trace/fs.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
current->stack_start = current->mm->start_stack;
+ trace_fs_exec(filename);
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
#include <linux/pipe_fs_i.h>
+#include <trace/fs.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
static LIST_HEAD(formats);
static DEFINE_RWLOCK(binfmt_lock);
+/*
+ * Also used in compat.c.
+ */
+DEFINE_TRACE(fs_exec);
+
int __register_binfmt(struct linux_binfmt * fmt, int insert)
{
if (!fmt)
current->stack_start = current->mm->start_stack;
+ trace_fs_exec(filename);
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
#include <linux/writeback.h>
#include <linux/buffer_head.h>
#include <linux/falloc.h>
+#include <trace/fs.h>
#include <asm/ioctls.h>
+DEFINE_TRACE(fs_ioctl);
+
/* So that the fiemap access checks can't overflow on 32 bit machines. */
#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
if (!filp)
goto out;
+ trace_fs_ioctl(fd, cmd, arg);
+
error = security_file_ioctl(filp, cmd, arg);
if (error)
goto out_fput;
#include <linux/audit.h>
#include <linux/falloc.h>
#include <linux/fs_struct.h>
+#include <trace/fs.h>
+
+DEFINE_TRACE(fs_open);
+DEFINE_TRACE(fs_close);
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
fsnotify_open(f->f_path.dentry);
fd_install(fd, f);
}
+ trace_fs_open(fd, tmp);
}
putname(tmp);
}
filp = fdt->fd[fd];
if (!filp)
goto out_unlock;
+ trace_fs_close(fd);
rcu_assign_pointer(fdt->fd[fd], NULL);
FD_CLR(fd, fdt->close_on_exec);
__put_unused_fd(files, fd);
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/splice.h>
+#include <trace/fs.h>
#include "read_write.h"
#include <asm/uaccess.h>
EXPORT_SYMBOL(generic_ro_fops);
+DEFINE_TRACE(fs_lseek);
+DEFINE_TRACE(fs_llseek);
+DEFINE_TRACE(fs_read);
+DEFINE_TRACE(fs_write);
+DEFINE_TRACE(fs_pread64);
+DEFINE_TRACE(fs_pwrite64);
+DEFINE_TRACE(fs_readv);
+DEFINE_TRACE(fs_writev);
+
/**
* generic_file_llseek_unlocked - lockless generic llseek implementation
* @file: file structure to seek on
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
}
+
+ trace_fs_lseek(fd, offset, origin);
+
fput_light(file, fput_needed);
bad:
return retval;
offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low,
origin);
+ trace_fs_llseek(fd, offset, origin);
+
retval = (int)offset;
if (offset >= 0) {
retval = -EFAULT;
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_read(file, buf, count, &pos);
+ trace_fs_read(fd, buf, count, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_write(file, buf, count, &pos);
+ trace_fs_write(fd, buf, count, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
ret = -ESPIPE;
- if (file->f_mode & FMODE_PREAD)
+ if (file->f_mode & FMODE_PREAD) {
ret = vfs_read(file, buf, count, &pos);
+ trace_fs_pread64(fd, buf, count, pos, ret);
+ }
+
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
ret = -ESPIPE;
- if (file->f_mode & FMODE_PWRITE)
+ if (file->f_mode & FMODE_PWRITE) {
ret = vfs_write(file, buf, count, &pos);
+ trace_fs_pwrite64(fd, buf, count, pos, ret);
+ }
fput_light(file, fput_needed);
}
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_readv(file, vec, vlen, &pos);
+ trace_fs_readv(fd, vec, vlen, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
if (file) {
loff_t pos = file_pos_read(file);
ret = vfs_writev(file, vec, vlen, &pos);
+ trace_fs_writev(fd, vec, vlen, ret);
file_pos_write(file, pos);
fput_light(file, fput_needed);
}
#include <linux/fs.h>
#include <linux/rcupdate.h>
#include <linux/hrtimer.h>
+#include <trace/fs.h>
#include <asm/uaccess.h>
#define POLL_TABLE_FULL(table) \
((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
+DEFINE_TRACE(fs_select);
+DEFINE_TRACE(fs_poll);
+
/*
* Ok, Peter made a complicated, but straightforward multiple_wait() function.
* I have rewritten this, taking some shortcuts: This code may not be easy to
file = fget_light(i, &fput_needed);
if (file) {
f_op = file->f_op;
+ trace_fs_select(i, end_time);
mask = DEFAULT_POLLMASK;
if (f_op && f_op->poll) {
wait_key_set(wait, in, out, bit);
file = fget_light(fd, &fput_needed);
mask = POLLNVAL;
if (file != NULL) {
+ trace_fs_poll(fd);
mask = DEFAULT_POLLMASK;
if (file->f_op && file->f_op->poll) {
if (pwait)
#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
+#include <trace/net.h>
#include <linux/ethtool.h>
#include <net/net_namespace.h>
--- /dev/null
+#ifndef _TRACE_FILEMAP_H
+#define _TRACE_FILEMAP_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(wait_on_page_start,
+ TP_PROTO(struct page *page, int bit_nr),
+ TP_ARGS(page, bit_nr));
+DECLARE_TRACE(wait_on_page_end,
+ TP_PROTO(struct page *page, int bit_nr),
+ TP_ARGS(page, bit_nr));
+
+#endif
--- /dev/null
+#ifndef _TRACE_FS_H
+#define _TRACE_FS_H
+
+#include <linux/buffer_head.h>
+#include <linux/time.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(fs_buffer_wait_start,
+ TP_PROTO(struct buffer_head *bh),
+ TP_ARGS(bh));
+DECLARE_TRACE(fs_buffer_wait_end,
+ TP_PROTO(struct buffer_head *bh),
+ TP_ARGS(bh));
+DECLARE_TRACE(fs_exec,
+ TP_PROTO(char *filename),
+ TP_ARGS(filename));
+DECLARE_TRACE(fs_ioctl,
+ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
+ TP_ARGS(fd, cmd, arg));
+DECLARE_TRACE(fs_open,
+ TP_PROTO(int fd, char *filename),
+ TP_ARGS(fd, filename));
+DECLARE_TRACE(fs_close,
+ TP_PROTO(unsigned int fd),
+ TP_ARGS(fd));
+DECLARE_TRACE(fs_lseek,
+ TP_PROTO(unsigned int fd, long offset, unsigned int origin),
+ TP_ARGS(fd, offset, origin));
+DECLARE_TRACE(fs_llseek,
+ TP_PROTO(unsigned int fd, loff_t offset, unsigned int origin),
+ TP_ARGS(fd, offset, origin));
+
+/*
+ * Probes must be aware that __user * may be modified by concurrent userspace
+ * or kernel threads.
+ */
+DECLARE_TRACE(fs_read,
+ TP_PROTO(unsigned int fd, char __user *buf, size_t count, ssize_t ret),
+ TP_ARGS(fd, buf, count, ret));
+DECLARE_TRACE(fs_write,
+ TP_PROTO(unsigned int fd, const char __user *buf, size_t count,
+ ssize_t ret),
+ TP_ARGS(fd, buf, count, ret));
+DECLARE_TRACE(fs_pread64,
+ TP_PROTO(unsigned int fd, char __user *buf, size_t count, loff_t pos,
+ ssize_t ret),
+ TP_ARGS(fd, buf, count, pos, ret));
+DECLARE_TRACE(fs_pwrite64,
+ TP_PROTO(unsigned int fd, const char __user *buf, size_t count,
+ loff_t pos, ssize_t ret),
+ TP_ARGS(fd, buf, count, pos, ret));
+DECLARE_TRACE(fs_readv,
+ TP_PROTO(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, ssize_t ret),
+ TP_ARGS(fd, vec, vlen, ret));
+DECLARE_TRACE(fs_writev,
+ TP_PROTO(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, ssize_t ret),
+ TP_ARGS(fd, vec, vlen, ret));
+DECLARE_TRACE(fs_select,
+ TP_PROTO(int fd, struct timespec *end_time),
+ TP_ARGS(fd, end_time));
+DECLARE_TRACE(fs_poll,
+ TP_PROTO(int fd),
+ TP_ARGS(fd));
+#endif
--- /dev/null
+#ifndef _TRACE_HUGETLB_H
+#define _TRACE_HUGETLB_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(hugetlb_page_release,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page));
+DECLARE_TRACE(hugetlb_page_grab,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page));
+DECLARE_TRACE(hugetlb_buddy_pgalloc,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page));
+DECLARE_TRACE(hugetlb_page_alloc,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page));
+DECLARE_TRACE(hugetlb_page_free,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page));
+DECLARE_TRACE(hugetlb_pages_reserve,
+ TP_PROTO(struct inode *inode, long from, long to, int ret),
+ TP_ARGS(inode, from, to, ret));
+DECLARE_TRACE(hugetlb_pages_unreserve,
+ TP_PROTO(struct inode *inode, long offset, long freed),
+ TP_ARGS(inode, offset, freed));
+
+#endif
--- /dev/null
+#ifndef _TRACE_IPC_H
+#define _TRACE_IPC_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(ipc_msg_create,
+ TP_PROTO(long id, int flags),
+ TP_ARGS(id, flags));
+DECLARE_TRACE(ipc_sem_create,
+ TP_PROTO(long id, int flags),
+ TP_ARGS(id, flags));
+DECLARE_TRACE(ipc_shm_create,
+ TP_PROTO(long id, int flags),
+ TP_ARGS(id, flags));
+DECLARE_TRACE(ipc_call,
+ TP_PROTO(unsigned int call, unsigned int first),
+ TP_ARGS(call, first));
+#endif
--- /dev/null
+#ifndef _TRACE_IPV4_H
+#define _TRACE_IPV4_H
+
+#include <linux/inetdevice.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(ipv4_addr_add,
+ TP_PROTO(struct in_ifaddr *ifa),
+ TP_ARGS(ifa));
+DECLARE_TRACE(ipv4_addr_del,
+ TP_PROTO(struct in_ifaddr *ifa),
+ TP_ARGS(ifa));
+
+#endif
--- /dev/null
+#ifndef _TRACE_IPV6_H
+#define _TRACE_IPV6_H
+
+#include <net/if_inet6.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(ipv6_addr_add,
+ TP_PROTO(struct inet6_ifaddr *ifa),
+ TP_ARGS(ifa));
+DECLARE_TRACE(ipv6_addr_del,
+ TP_PROTO(struct inet6_ifaddr *ifa),
+ TP_ARGS(ifa));
+
+#endif
--- /dev/null
+#ifndef _LTTNG_TRACE_IRQ_H
+#define _LTTNG_TRACE_IRQ_H
+
+#include <linux/kdebug.h>
+
+/*
+ * action can be NULL if not available.
+ */
+DECLARE_TRACE(irq_entry,
+ TP_PROTO(unsigned int id, struct pt_regs *regs,
+ struct irqaction *action),
+ TP_ARGS(id, regs, action));
+DECLARE_TRACE(irq_exit,
+ TP_PROTO(irqreturn_t retval),
+ TP_ARGS(retval));
+
+
+DECLARE_TRACE(irq_softirq_raise,
+ TP_PROTO(unsigned int nr),
+ TP_ARGS(nr));
+
+#endif
--- /dev/null
+#ifndef _TRACE_KERNEL_H
+#define _TRACE_KERNEL_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(kernel_printk,
+ TP_PROTO(unsigned long retaddr),
+ TP_ARGS(retaddr));
+DECLARE_TRACE(kernel_vprintk,
+ TP_PROTO(unsigned long retaddr, char *buf, int len),
+ TP_ARGS(retaddr, buf, len));
+
+#endif
--- /dev/null
+#ifndef _TRACE_NET_H
+#define _TRACE_NET_H
+
+#include <linux/tracepoint.h>
+
+struct sk_buff;
+DECLARE_TRACE(net_dev_xmit,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+DECLARE_TRACE(net_dev_receive,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+/*
+ * Note these first 2 traces are actually in __napi_schedule and net_rx_action
+ * respectively. The former is in __napi_schedule because it uses at-most-once
+ * logic and placing it in the calling routine (napi_schedule) would produce
+ * countless trace events that were effectively no-ops. napi_poll is
+ * implemented in net_rx_action, because thats where we do our polling on
+ * devices. The last trace point is in napi_complete, right where you would
+ * think it would be.
+ */
+struct napi_struct;
+DECLARE_TRACE(net_napi_schedule,
+ TP_PROTO(struct napi_struct *n),
+ TP_ARGS(n));
+DECLARE_TRACE(net_napi_poll,
+ TP_PROTO(struct napi_struct *n),
+ TP_ARGS(n));
+DECLARE_TRACE(net_napi_complete,
+ TP_PROTO(struct napi_struct *n),
+ TP_ARGS(n));
+
+#endif
--- /dev/null
+#ifndef _TRACE_PAGE_ALLOC_H
+#define _TRACE_PAGE_ALLOC_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * mm_page_alloc : page can be NULL.
+ */
+DECLARE_TRACE(page_alloc,
+ TP_PROTO(struct page *page, unsigned int order),
+ TP_ARGS(page, order));
+DECLARE_TRACE(page_free,
+ TP_PROTO(struct page *page, unsigned int order),
+ TP_ARGS(page, order));
+
+#endif
--- /dev/null
+#ifndef _TRACE_SOCKET_H
+#define _TRACE_SOCKET_H
+
+#include <net/sock.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(socket_sendmsg,
+ TP_PROTO(struct socket *sock, struct msghdr *msg, size_t size, int ret),
+ TP_ARGS(sock, msg, size, ret));
+DECLARE_TRACE(socket_recvmsg,
+ TP_PROTO(struct socket *sock, struct msghdr *msg, size_t size, int flags,
+ int ret),
+ TP_ARGS(sock, msg, size, flags, ret));
+DECLARE_TRACE(socket_create,
+ TP_PROTO(struct socket *sock, int fd),
+ TP_ARGS(sock, fd));
+/*
+ * socket_call
+ *
+ * TODO : This tracepoint should be expanded to cover each element of the
+ * switch in sys_socketcall().
+ */
+DECLARE_TRACE(socket_call,
+ TP_PROTO(int call, unsigned long a0),
+ TP_ARGS(call, a0));
+#endif
--- /dev/null
+#ifndef _TRACE_SWAP_H
+#define _TRACE_SWAP_H
+
+#include <linux/swap.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(swap_in,
+ TP_PROTO(struct page *page, swp_entry_t entry),
+ TP_ARGS(page, entry));
+DECLARE_TRACE(swap_out,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page));
+DECLARE_TRACE(swap_file_open,
+ TP_PROTO(struct file *file, char *filename),
+ TP_ARGS(file, filename));
+DECLARE_TRACE(swap_file_close,
+ TP_PROTO(struct file *file),
+ TP_ARGS(file));
+
+#endif
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
+#include <trace/ipc.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
+DEFINE_TRACE(ipc_msg_create);
+
static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
static int newque(struct ipc_namespace *, struct ipc_params *);
#ifdef CONFIG_PROC_FS
struct ipc_namespace *ns;
struct ipc_ops msg_ops;
struct ipc_params msg_params;
+ long ret;
ns = current->nsproxy->ipc_ns;
msg_params.key = key;
msg_params.flg = msgflg;
- return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
+ ret = ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
+ trace_ipc_msg_create(ret, msgflg);
+ return ret;
}
static inline unsigned long
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
+#include <trace/ipc.h>
#include <asm/uaccess.h>
#include "util.h"
#define sc_semopm sem_ctls[2]
#define sc_semmni sem_ctls[3]
+DEFINE_TRACE(ipc_sem_create);
+
void sem_init_ns(struct ipc_namespace *ns)
{
ns->sc_semmsl = SEMMSL;
struct ipc_namespace *ns;
struct ipc_ops sem_ops;
struct ipc_params sem_params;
+ long err;
ns = current->nsproxy->ipc_ns;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
- return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
+ err = ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
+ trace_ipc_sem_create(err, semflg);
+ return err;
}
/*
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
#include <linux/ima.h>
+#include <trace/ipc.h>
#include <asm/uaccess.h>
static const struct file_operations shm_file_operations;
static const struct vm_operations_struct shm_vm_ops;
+DEFINE_TRACE(ipc_shm_create);
+
#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
#define shm_unlock(shp) \
struct ipc_namespace *ns;
struct ipc_ops shm_ops;
struct ipc_params shm_params;
+ long err;
ns = current->nsproxy->ipc_ns;
shm_params.flg = shmflg;
shm_params.u.size = size;
- return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
+ err = ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
+ trace_ipc_shm_create(err, shmflg);
+ return err;
}
static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
#include <linux/hash.h>
#include <linux/bootmem.h>
#include <trace/events/irq.h>
+#include <trace/irq.h>
#include "internals.h"
"but no thread function available.", irq, action->name);
}
+DEFINE_TRACE(irq_entry);
+DEFINE_TRACE(irq_exit);
+
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
+ trace_irq_entry(irq, NULL, action);
+
if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();
add_interrupt_randomness(irq);
local_irq_disable();
+ trace_irq_exit(retval);
+
return retval;
}
#include <linux/kexec.h>
#include <linux/jhash.h>
#include <linux/device.h>
+#include <trace/kernel.h>
#include <asm/uaccess.h>
MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
};
+EXPORT_SYMBOL_GPL(console_printk);
static int saved_console_loglevel = -1;
/* Flag: console code may call schedule() */
static int console_may_schedule;
+DEFINE_TRACE(kernel_printk);
+DEFINE_TRACE(kernel_vprintk);
+
#ifdef CONFIG_PRINTK
static char __log_buf[__LOG_BUF_LEN];
int r;
va_start(args, fmt);
+ trace_kernel_printk(_RET_IP_);
r = vprintk(fmt, args);
va_end(args);
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
+ trace_kernel_vprintk(_RET_IP_, printk_buf, printed_len);
p = printk_buf;
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/tick.h>
+#include <trace/irq.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
#include <linux/memcontrol.h>
#include <linux/precache.h>
#include <linux/mm_inline.h> /* for page_is_file_cache() */
+#include <trace/filemap.h>
#include "internal.h"
/*
#include <asm/mman.h>
+DEFINE_TRACE(wait_on_page_start);
+DEFINE_TRACE(wait_on_page_end);
+
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
+ trace_wait_on_page_start(page, bit_nr);
if (test_bit(bit_nr, &page->flags))
__wait_on_bit(page_waitqueue(page), &wait, sync_page,
TASK_UNINTERRUPTIBLE);
+ trace_wait_on_page_end(page, bit_nr);
}
EXPORT_SYMBOL(wait_on_page_bit);
#include <linux/mutex.h>
#include <linux/bootmem.h>
#include <linux/sysfs.h>
+#include <trace/hugetlb.h>
#include <asm/page.h>
#include <asm/pgtable.h>
*/
static DEFINE_SPINLOCK(hugetlb_lock);
+DEFINE_TRACE(hugetlb_page_release);
+DEFINE_TRACE(hugetlb_page_grab);
+DEFINE_TRACE(hugetlb_buddy_pgalloc);
+DEFINE_TRACE(hugetlb_page_alloc);
+DEFINE_TRACE(hugetlb_page_free);
+DEFINE_TRACE(hugetlb_pages_reserve);
+DEFINE_TRACE(hugetlb_pages_unreserve);
+
/*
* Region tracking -- allows tracking of reservations and instantiated pages
* across the pages in a mapping.
VM_BUG_ON(h->order >= MAX_ORDER);
+ trace_hugetlb_page_release(page);
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) {
int nid = page_to_nid(page);
struct address_space *mapping;
+ trace_hugetlb_page_free(page);
mapping = (struct address_space *) page_private(page);
set_page_private(page, 0);
BUG_ON(page_count(page));
{
struct page *page;
- if (h->order >= MAX_ORDER)
- return NULL;
+ if (h->order >= MAX_ORDER) {
+ page = NULL;
+ goto end;
+ }
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
if (page) {
if (arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
- return NULL;
+ page = NULL;
+ goto end;
}
prep_new_huge_page(h, page, nid);
}
-
+end:
+ trace_hugetlb_page_grab(page);
return page;
}
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
spin_unlock(&hugetlb_lock);
- return NULL;
+ page = NULL;
+ goto end;
} else {
h->nr_huge_pages++;
h->surplus_huge_pages++;
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
}
spin_unlock(&hugetlb_lock);
-
+end:
+ trace_hugetlb_buddy_pgalloc(page);
return page;
}
vma_commit_reservation(h, vma, addr);
+ trace_hugetlb_page_alloc(page);
return page;
}
struct vm_area_struct *vma,
int acctflag)
{
- long ret, chg;
+ int ret = 0;
+ long chg;
struct hstate *h = hstate_inode(inode);
/*
* and filesystem quota without using reserves
*/
if (acctflag & VM_NORESERVE)
- return 0;
+ goto end;
/*
* Shared mappings base their reservation on the number of pages that
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
- if (!resv_map)
- return -ENOMEM;
+ if (!resv_map) {
+ ret = -ENOMEM;
+ goto end;
+ }
chg = to - from;
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
- if (chg < 0)
- return chg;
+ if (chg < 0) {
+ ret = chg;
+ goto end;
+ }
/* There must be enough filesystem quota for the mapping */
- if (hugetlb_get_quota(inode->i_mapping, chg))
- return -ENOSPC;
+ if (hugetlb_get_quota(inode->i_mapping, chg)) {
+ ret = -ENOSPC;
+ goto end;
+ }
/*
* Check enough hugepages are available for the reservation.
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugetlb_put_quota(inode->i_mapping, chg);
- return ret;
+ goto end;
}
/*
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
- return 0;
+end:
+ trace_hugetlb_pages_reserve(inode, from, to, ret);
+ return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
struct hstate *h = hstate_inode(inode);
- long chg = region_truncate(&inode->i_mapping->private_list, offset);
+ long chg;
+ trace_hugetlb_pages_unreserve(inode, offset, freed);
+ chg = region_truncate(&inode->i_mapping->private_list, offset);
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
#include <linux/kallsyms.h>
#include <linux/swapops.h>
#include <linux/elf.h>
+#include <trace/swap.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include "internal.h"
+DEFINE_TRACE(swap_in);
+
#ifndef CONFIG_NEED_MULTIPLE_NODES
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
+ trace_swap_in(page, entry);
} else if (PageHWPoison(page)) {
ret = VM_FAULT_HWPOISON;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
#include <trace/events/kmem.h>
+#include <trace/page_alloc.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
int pageblock_order __read_mostly;
#endif
+DEFINE_TRACE(page_alloc);
+DEFINE_TRACE(page_free);
+
static void __free_pages_ok(struct page *page, unsigned int order);
/*
kmemcheck_free_shadow(page, order);
+ trace_page_free(page, order);
+
for (i = 0 ; i < (1 << order) ; ++i)
bad += free_pages_check(page + i);
if (bad)
kmemcheck_free_shadow(page, 0);
+ trace_page_free(page, 0);
+
if (PageAnon(page))
page->mapping = NULL;
if (free_pages_check(page))
}
return page;
got_pg:
+ trace_page_alloc(page, order);
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
return page;
#include <linux/bio.h>
#include <linux/swapops.h>
#include <linux/writeback.h>
+#include <trace/swap.h>
#include <asm/pgtable.h>
+DEFINE_TRACE(swap_out);
+
static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
struct page *page, bio_end_io_t end_io)
{
#ifndef CONFIG_PRECACHE
set_page_writeback(page);
#endif
+ trace_swap_out(page);
unlock_page(page);
submit_bio(rw, bio);
out:
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
+#include <trace/swap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/page_cgroup.h>
+DEFINE_TRACE(swap_file_open);
+DEFINE_TRACE(swap_file_close);
+
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
long nr_swap_pages;
swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
+ trace_swap_file_close(swap_file);
preswap_flush_area(p - swap_info);
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
} else {
swap_info[prev].next = p - swap_info;
}
+ trace_swap_file_open(swap_file, name);
preswap_init(p - swap_info);
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
+#include <trace/net.h>
#include "net-sysfs.h"
#define NETDEV_HASHBITS 8
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
+DEFINE_TRACE(net_dev_xmit);
+DEFINE_TRACE(net_dev_receive);
+DEFINE_TRACE(net_napi_schedule);
+DEFINE_TRACE(net_napi_poll);
+DEFINE_TRACE(net_napi_complete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(net_napi_complete);
+
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{
unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
}
gso:
+ trace_net_dev_xmit(skb);
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
__get_cpu_var(netdev_rx_stat).total++;
+ trace_net_dev_receive(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
{
unsigned long flags;
+ trace_net_napi_schedule(n);
+
local_irq_save(flags);
list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
list_del(&n->poll_list);
smp_mb__before_clear_bit();
clear_bit(NAPI_STATE_SCHED, &n->state);
+ trace_net_napi_complete(n);
}
EXPORT_SYMBOL(__napi_complete);
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+ trace_net_napi_poll(n);
work = n->poll(n, weight);
trace_napi_poll(n);
}
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
+#include <trace/ipv4.h>
static struct ipv4_devconf ipv4_devconf = {
.data = {
[IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
};
+DEFINE_TRACE(ipv4_addr_add);
+DEFINE_TRACE(ipv4_addr_del);
+
static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
struct in_ifaddr **ifap1 = &ifa1->ifa_next;
while ((ifa = *ifap1) != NULL) {
+ trace_ipv4_addr_del(ifa);
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
}
ifa->ifa_flags |= IFA_F_SECONDARY;
}
+ trace_ipv4_addr_add(ifa);
}
if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <trace/ipv6.h>
/* Set to 3 to get tracing... */
#define ACONF_DEBUG 2
#define INFINITY_LIFE_TIME 0xFFFFFFFF
#define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b)))
+DEFINE_TRACE(ipv6_addr_add);
+DEFINE_TRACE(ipv6_addr_del);
+
#ifdef CONFIG_SYSCTL
static void addrconf_sysctl_register(struct inet6_dev *idev);
static void addrconf_sysctl_unregister(struct inet6_dev *idev);
/* For caller */
in6_ifa_hold(ifa);
+ trace_ipv6_addr_add(ifa);
+
/* Add to big hash table */
hash = ipv6_addr_hash(addr);
in6_ifa_hold(ifp);
read_unlock_bh(&idev->lock);
+ trace_ipv6_addr_del(ifp);
ipv6_del_addr(ifp);
/* If the last address is deleted administratively,
#include <net/sock.h>
#include <linux/netfilter.h>
+#include <trace/socket.h>
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
static DEFINE_PER_CPU(int, sockets_in_use) = 0;
+DEFINE_TRACE(socket_sendmsg);
+DEFINE_TRACE(socket_recvmsg);
+DEFINE_TRACE(socket_create);
+DEFINE_TRACE(socket_call);
+
/*
* Support routines.
* Move socket addresses back and forth across the kernel/user
ret = __sock_sendmsg(&iocb, sock, msg, size);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
+ trace_socket_sendmsg(sock, msg, size, ret);
return ret;
}
int ret;
init_sync_kiocb(&iocb, NULL);
+
iocb.private = &siocb;
ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
+ trace_socket_recvmsg(sock, msg, size, flags, ret);
return ret;
}
if (retval < 0)
goto out_release;
+ trace_socket_create(sock, retval);
out:
/* It may be already another descriptor 8) Not kernel problem. */
return retval;
a0 = a[0];
a1 = a[1];
+ trace_socket_call(call, a0);
+
switch (call) {
case SYS_SOCKET:
err = sys_socket(a0, a1, a[2]);