perf: Fix software event overflow, CVE-2011-2918
[linux-flexiantxendom0-natty.git] / kernel / perf_event.c
index 357ee8d..478ec5d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mm.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/idr.h>
 #include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/slab.h>
@@ -21,7 +22,9 @@
 #include <linux/dcache.h>
 #include <linux/percpu.h>
 #include <linux/ptrace.h>
+#include <linux/reboot.h>
 #include <linux/vmstat.h>
+#include <linux/device.h>
 #include <linux/vmalloc.h>
 #include <linux/hardirq.h>
 #include <linux/rculist.h>
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/ftrace_event.h>
+#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
-static atomic_t nr_events __read_mostly;
+enum event_type_t {
+       EVENT_FLEXIBLE = 0x1,
+       EVENT_PINNED = 0x2,
+       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
+atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
 static atomic_t nr_task_events __read_mostly;
@@ -52,7 +62,8 @@ static struct srcu_struct pmus_srcu;
  */
 int sysctl_perf_event_paranoid __read_mostly = 1;
 
-int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
+/* Minimum for 512 kiB + 1 user control page */
+int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
 
 /*
  * max perf event sample rate
@@ -61,8 +72,24 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
 
 static atomic64_t perf_event_id;
 
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                             enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+                            enum event_type_t event_type);
+
 void __weak perf_event_print_debug(void)       { }
 
+extern __weak const char *perf_pmu_name(void)
+{
+       return "pmu";
+}
+
+static inline u64 perf_clock(void)
+{
+       return local_clock();
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
        int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -77,23 +104,22 @@ void perf_pmu_enable(struct pmu *pmu)
                pmu->pmu_enable(pmu);
 }
 
+static DEFINE_PER_CPU(struct list_head, rotation_list);
+
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
 static void perf_pmu_rotate_start(struct pmu *pmu)
 {
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       struct list_head *head = &__get_cpu_var(rotation_list);
 
-       if (hrtimer_active(&cpuctx->timer))
-               return;
-
-       __hrtimer_start_range_ns(&cpuctx->timer,
-                       ns_to_ktime(cpuctx->timer_interval), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
-}
-
-static void perf_pmu_rotate_stop(struct pmu *pmu)
-{
-       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       WARN_ON(!irqs_disabled());
 
-       hrtimer_cancel(&cpuctx->timer);
+       if (list_empty(&cpuctx->rotation_list))
+               list_add(&cpuctx->rotation_list, head);
 }
 
 static void get_ctx(struct perf_event_context *ctx)
@@ -128,6 +154,28 @@ static void unclone_ctx(struct perf_event_context *ctx)
        }
 }
 
+static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
+{
+       /*
+        * only top level events have the pid namespace they were created in
+        */
+       if (event->parent)
+               event = event->parent;
+
+       return task_tgid_nr_ns(p, event->ns);
+}
+
+static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
+{
+       /*
+        * only top level events have the pid namespace they were created in
+        */
+       if (event->parent)
+               event = event->parent;
+
+       return task_pid_nr_ns(p, event->ns);
+}
+
 /*
  * If we inherit events we want to return the parent event id
  * to userspace.
@@ -210,11 +258,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
        put_ctx(ctx);
 }
 
-static inline u64 perf_clock(void)
-{
-       return local_clock();
-}
-
 /*
  * Update the record of the current time in a context.
  */
@@ -226,6 +269,12 @@ static void update_context_time(struct perf_event_context *ctx)
        ctx->timestamp = now;
 }
 
+static u64 perf_event_time(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+       return ctx ? ctx->time : 0;
+}
+
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
  */
@@ -239,7 +288,7 @@ static void update_event_times(struct perf_event *event)
                return;
 
        if (ctx->is_active)
-               run_end = ctx->time;
+               run_end = perf_event_time(event);
        else
                run_end = event->tstamp_stopped;
 
@@ -248,7 +297,7 @@ static void update_event_times(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_INACTIVE)
                run_end = event->tstamp_stopped;
        else
-               run_end = ctx->time;
+               run_end = perf_event_time(event);
 
        event->total_time_running = run_end - event->tstamp_running;
 }
@@ -307,11 +356,91 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
                ctx->nr_stat++;
 }
 
+/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+static void perf_event__read_size(struct perf_event *event)
+{
+       int entry = sizeof(u64); /* value */
+       int size = 0;
+       int nr = 1;
+
+       if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+               size += sizeof(u64);
+
+       if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+               size += sizeof(u64);
+
+       if (event->attr.read_format & PERF_FORMAT_ID)
+               entry += sizeof(u64);
+
+       if (event->attr.read_format & PERF_FORMAT_GROUP) {
+               nr += event->group_leader->nr_siblings;
+               size += sizeof(u64);
+       }
+
+       size += entry * nr;
+       event->read_size = size;
+}
+
+static void perf_event__header_size(struct perf_event *event)
+{
+       struct perf_sample_data *data;
+       u64 sample_type = event->attr.sample_type;
+       u16 size = 0;
+
+       perf_event__read_size(event);
+
+       if (sample_type & PERF_SAMPLE_IP)
+               size += sizeof(data->ip);
+
+       if (sample_type & PERF_SAMPLE_ADDR)
+               size += sizeof(data->addr);
+
+       if (sample_type & PERF_SAMPLE_PERIOD)
+               size += sizeof(data->period);
+
+       if (sample_type & PERF_SAMPLE_READ)
+               size += event->read_size;
+
+       event->header_size = size;
+}
+
+static void perf_event__id_header_size(struct perf_event *event)
+{
+       struct perf_sample_data *data;
+       u64 sample_type = event->attr.sample_type;
+       u16 size = 0;
+
+       if (sample_type & PERF_SAMPLE_TID)
+               size += sizeof(data->tid_entry);
+
+       if (sample_type & PERF_SAMPLE_TIME)
+               size += sizeof(data->time);
+
+       if (sample_type & PERF_SAMPLE_ID)
+               size += sizeof(data->id);
+
+       if (sample_type & PERF_SAMPLE_STREAM_ID)
+               size += sizeof(data->stream_id);
+
+       if (sample_type & PERF_SAMPLE_CPU)
+               size += sizeof(data->cpu_entry);
+
+       event->id_header_size = size;
+}
+
 static void perf_group_attach(struct perf_event *event)
 {
-       struct perf_event *group_leader = event->group_leader;
+       struct perf_event *group_leader = event->group_leader, *pos;
+
+       /*
+        * We can have double attach due to group movement in perf_event_open.
+        */
+       if (event->attach_state & PERF_ATTACH_GROUP)
+               return;
 
-       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
        event->attach_state |= PERF_ATTACH_GROUP;
 
        if (group_leader == event)
@@ -323,6 +452,11 @@ static void perf_group_attach(struct perf_event *event)
 
        list_add_tail(&event->group_entry, &group_leader->sibling_list);
        group_leader->nr_siblings++;
+
+       perf_event__header_size(group_leader);
+
+       list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
+               perf_event__header_size(pos);
 }
 
 /*
@@ -381,7 +515,7 @@ static void perf_group_detach(struct perf_event *event)
        if (event->group_leader != event) {
                list_del_init(&event->group_entry);
                event->group_leader->nr_siblings--;
-               return;
+               goto out;
        }
 
        if (!list_empty(&event->group_entry))
@@ -400,6 +534,12 @@ static void perf_group_detach(struct perf_event *event)
                /* Inherit group flags from the previous leader */
                sibling->group_flags = event->group_flags;
        }
+
+out:
+       perf_event__header_size(event->group_leader);
+
+       list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
+               perf_event__header_size(tmp);
 }
 
 static inline int
@@ -413,6 +553,7 @@ event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
        u64 delta;
        /*
         * An event which could not be activated because of
@@ -424,7 +565,7 @@ event_sched_out(struct perf_event *event,
            && !event_filter_match(event)) {
                delta = ctx->time - event->tstamp_stopped;
                event->tstamp_running += delta;
-               event->tstamp_stopped = ctx->time;
+               event->tstamp_stopped = tstamp;
        }
 
        if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -435,7 +576,7 @@ event_sched_out(struct perf_event *event,
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
-       event->tstamp_stopped = ctx->time;
+       event->tstamp_stopped = tstamp;
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
@@ -642,16 +783,33 @@ retry:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
 static int
 event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
+
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
        event->state = PERF_EVENT_STATE_ACTIVE;
        event->oncpu = smp_processor_id();
+
+       /*
+        * Unthrottle events, since we scheduled we might have missed several
+        * ticks already, also for a heavily scheduling task there is little
+        * guarantee it'll get a tick in a timely manner.
+        */
+       if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+               perf_log_throttle(event, 1);
+               event->hw.interrupts = 0;
+       }
+
        /*
         * The new state must be visible before we turn it on in the hardware:
         */
@@ -663,7 +821,9 @@ event_sched_in(struct perf_event *event,
                return -EAGAIN;
        }
 
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
+       event->shadow_ctx_time = tstamp - ctx->timestamp;
 
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
@@ -682,6 +842,8 @@ group_sched_in(struct perf_event *group_event,
 {
        struct perf_event *event, *partial_group = NULL;
        struct pmu *pmu = group_event->pmu;
+       u64 now = ctx->time;
+       bool simulate = false;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
@@ -710,11 +872,27 @@ group_error:
        /*
         * Groups can be scheduled in as one unit only, so undo any
         * partial group before returning:
+        * The events up to the failed event are scheduled out normally,
+        * tstamp_stopped will be updated.
+        *
+        * The failed events and the remaining siblings need to have
+        * their timings updated as if they had gone thru event_sched_in()
+        * and event_sched_out(). This is required to get consistent timings
+        * across the group. This also takes care of the case where the group
+        * could never be scheduled by ensuring tstamp_stopped is set to mark
+        * the time the event was actually stopped, such that time delta
+        * calculation in update_event_times() is correct.
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
                if (event == partial_group)
-                       break;
-               event_sched_out(event, cpuctx, ctx);
+                       simulate = true;
+
+               if (simulate) {
+                       event->tstamp_running += now - event->tstamp_stopped;
+                       event->tstamp_stopped = now;
+               } else {
+                       event_sched_out(event, cpuctx, ctx);
+               }
        }
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -757,11 +935,13 @@ static int group_can_go_on(struct perf_event *event,
 static void add_event_to_ctx(struct perf_event *event,
                               struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
+
        list_add_event(event, ctx);
        perf_group_attach(event);
-       event->tstamp_enabled = ctx->time;
-       event->tstamp_running = ctx->time;
-       event->tstamp_stopped = ctx->time;
+       event->tstamp_enabled = tstamp;
+       event->tstamp_running = tstamp;
+       event->tstamp_stopped = tstamp;
 }
 
 /*
@@ -796,7 +976,7 @@ static void __perf_install_in_context(void *info)
 
        add_event_to_ctx(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -901,14 +1081,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
                                        struct perf_event_context *ctx)
 {
        struct perf_event *sub;
+       u64 tstamp = perf_event_time(event);
 
        event->state = PERF_EVENT_STATE_INACTIVE;
-       event->tstamp_enabled = ctx->time - event->total_time_enabled;
+       event->tstamp_enabled = tstamp - event->total_time_enabled;
        list_for_each_entry(sub, &event->sibling_list, group_entry) {
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
-                       sub->tstamp_enabled =
-                               ctx->time - sub->total_time_enabled;
-               }
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+                       sub->tstamp_enabled = tstamp - sub->total_time_enabled;
        }
 }
 
@@ -941,7 +1120,7 @@ static void __perf_event_enable(void *info)
                goto unlock;
        __perf_event_mark_enabled(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1043,7 +1222,7 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
        /*
         * not supported on inherited events
         */
-       if (event->attr.inherit)
+       if (event->attr.inherit || !is_sampling_event(event))
                return -EINVAL;
 
        atomic_add(refresh, &event->event_limit);
@@ -1052,12 +1231,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
        return 0;
 }
 
-enum event_type_t {
-       EVENT_FLEXIBLE = 0x1,
-       EVENT_PINNED = 0x2,
-       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)
@@ -1065,6 +1238,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        struct perf_event *event;
 
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
        ctx->is_active = 0;
        if (likely(!ctx->nr_events))
                goto out;
@@ -1083,6 +1257,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                        group_sched_out(event, cpuctx, ctx);
        }
 out:
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1250,13 +1425,11 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  * accessing the event control register. If a NMI hits, then it will
  * not restart the event.
  */
-void perf_event_task_sched_out(struct task_struct *task,
-                              struct task_struct *next)
+void __perf_event_task_sched_out(struct task_struct *task,
+                                struct task_struct *next)
 {
        int ctxn;
 
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
-
        for_each_task_context_nr(ctxn)
                perf_event_context_sched_out(task, ctxn, next);
 }
@@ -1279,14 +1452,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
 /*
  * Called with IRQs disabled
  */
-static void __perf_event_task_sched_out(struct perf_event_context *ctx)
-{
-       task_ctx_sched_out(ctx, EVENT_ALL);
-}
-
-/*
- * Called with IRQs disabled
- */
 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type)
 {
@@ -1302,7 +1467,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
                if (event->state <= PERF_EVENT_STATE_OFF)
                        continue;
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, 1))
@@ -1334,7 +1499,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                 * Listen to the 'cpu' scheduling filter constraint
                 * of events:
                 */
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1400,6 +1565,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
        if (cpuctx->task_ctx == ctx)
                return;
 
+       perf_pmu_disable(ctx->pmu);
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
@@ -1418,6 +1584,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
         * cpu-context we got scheduled on is actually rotating.
         */
        perf_pmu_rotate_start(ctx->pmu);
+       perf_pmu_enable(ctx->pmu);
 }
 
 /*
@@ -1431,7 +1598,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-void perf_event_task_sched_in(struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *task)
 {
        struct perf_event_context *ctx;
        int ctxn;
@@ -1445,10 +1612,6 @@ void perf_event_task_sched_in(struct task_struct *task)
        }
 }
 
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
        u64 frequency = event->attr.sample_freq;
@@ -1559,7 +1722,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
 
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                hwc = &event->hw;
@@ -1596,42 +1759,44 @@ static void rotate_ctx(struct perf_event_context *ctx)
 {
        raw_spin_lock(&ctx->lock);
 
-       /* Rotate the first entry last of non-pinned groups */
-       list_rotate_left(&ctx->flexible_groups);
+       /*
+        * Rotate the first entry last of non-pinned groups. Rotation might be
+        * disabled by the inheritance code.
+        */
+       if (!ctx->rotate_disable)
+               list_rotate_left(&ctx->flexible_groups);
 
        raw_spin_unlock(&ctx->lock);
 }
 
 /*
- * Cannot race with ->pmu_rotate_start() because this is ran from hardirq
- * context, and ->pmu_rotate_start() is called with irqs disabled (both are
- * cpu affine, so there are no SMP races).
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
  */
-static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
+static void perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
-       enum hrtimer_restart restart = HRTIMER_NORESTART;
-       struct perf_cpu_context *cpuctx;
+       u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
        struct perf_event_context *ctx = NULL;
-       int rotate = 0;
-
-       cpuctx = container_of(timer, struct perf_cpu_context, timer);
+       int rotate = 0, remove = 1;
 
        if (cpuctx->ctx.nr_events) {
-               restart = HRTIMER_RESTART;
+               remove = 0;
                if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
                        rotate = 1;
        }
 
        ctx = cpuctx->task_ctx;
        if (ctx && ctx->nr_events) {
-               restart = HRTIMER_RESTART;
+               remove = 0;
                if (ctx->nr_events != ctx->nr_active)
                        rotate = 1;
        }
 
-       perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       perf_ctx_adjust_freq(&cpuctx->ctx, interval);
        if (ctx)
-               perf_ctx_adjust_freq(ctx, cpuctx->timer_interval);
+               perf_ctx_adjust_freq(ctx, interval);
 
        if (!rotate)
                goto done;
@@ -1649,9 +1814,24 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
                task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
 
 done:
-       hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval));
+       if (remove)
+               list_del_init(&cpuctx->rotation_list);
 
-       return restart;
+       perf_pmu_enable(cpuctx->ctx.pmu);
+}
+
+void perf_event_task_tick(void)
+{
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       struct perf_cpu_context *cpuctx, *tmp;
+
+       WARN_ON(!irqs_disabled());
+
+       list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+               if (cpuctx->jiffies_interval == 1 ||
+                               !(jiffies % cpuctx->jiffies_interval))
+                       perf_rotate_context(cpuctx);
+       }
 }
 
 static int event_enable_on_exec(struct perf_event *event,
@@ -1733,11 +1913,12 @@ static void __perf_event_read(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       update_context_time(ctx);
+       if (ctx->is_active)
+               update_context_time(ctx);
        update_event_times(event);
+       if (event->state == PERF_EVENT_STATE_ACTIVE)
+               event->pmu->read(event);
        raw_spin_unlock(&ctx->lock);
-
-       event->pmu->read(event);
 }
 
 static inline u64 perf_event_count(struct perf_event *event)
@@ -1759,7 +1940,13 @@ static u64 perf_event_read(struct perf_event *event)
                unsigned long flags;
 
                raw_spin_lock_irqsave(&ctx->lock, flags);
-               update_context_time(ctx);
+               /*
+                * may read while context is not active
+                * (e.g., thread is blocked), in that case
+                * we cannot update context time
+                */
+               if (ctx->is_active)
+                       update_context_time(ctx);
                update_event_times(event);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
@@ -1825,8 +2012,7 @@ static int alloc_callchain_buffers(void)
         * accessed from NMI. Use a temporary manual per cpu allocation
         * until that gets sorted out.
         */
-       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
-               num_possible_cpus();
+       size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 
        entries = kzalloc(size, GFP_KERNEL);
        if (!entries)
@@ -2009,23 +2195,49 @@ alloc_perf_context(struct pmu *pmu, struct task_struct *task)
        return ctx;
 }
 
+static struct task_struct *
+find_lively_task_by_vpid(pid_t vpid)
+{
+       struct task_struct *task;
+       int err;
+
+       rcu_read_lock();
+       if (!vpid)
+               task = current;
+       else
+               task = find_task_by_vpid(vpid);
+       if (task)
+               get_task_struct(task);
+       rcu_read_unlock();
+
+       if (!task)
+               return ERR_PTR(-ESRCH);
+
+       /* Reuse ptrace permission checks for now. */
+       err = -EACCES;
+       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+               goto errout;
+
+       return task;
+errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
+
+}
+
 static struct perf_event_context *
-find_get_context(struct pmu *pmu, pid_t pid, int cpu)
+find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
 {
        struct perf_event_context *ctx;
        struct perf_cpu_context *cpuctx;
-       struct task_struct *task;
        unsigned long flags;
        int ctxn, err;
 
-       if (pid == -1 && cpu != -1) {
+       if (!task) {
                /* Must be root to operate on a CPU event: */
                if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
                        return ERR_PTR(-EACCES);
 
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
-
                /*
                 * We could be clever and allow to attach a event to an
                 * offline CPU and activate it when the CPU comes up, but
@@ -2041,30 +2253,6 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu)
                return ctx;
        }
 
-       rcu_read_lock();
-       if (!pid)
-               task = current;
-       else
-               task = find_task_by_vpid(pid);
-       if (task)
-               get_task_struct(task);
-       rcu_read_unlock();
-
-       if (!task)
-               return ERR_PTR(-ESRCH);
-
-       /*
-        * Can't attach events to a dying task.
-        */
-       err = -ESRCH;
-       if (task->flags & PF_EXITING)
-               goto errout;
-
-       /* Reuse ptrace permission checks for now. */
-       err = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
-               goto errout;
-
        err = -EINVAL;
        ctxn = pmu->task_ctx_nr;
        if (ctxn < 0)
@@ -2085,22 +2273,33 @@ retry:
 
                get_ctx(ctx);
 
-               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
-                       /*
-                        * We raced with some other task; use
-                        * the context they set.
-                        */
+               err = 0;
+               mutex_lock(&task->perf_event_mutex);
+               /*
+                * If it has already passed perf_event_exit_task().
+                * we must see PF_EXITING, it takes this mutex too.
+                */
+               if (task->flags & PF_EXITING)
+                       err = -ESRCH;
+               else if (task->perf_event_ctxp[ctxn])
+                       err = -EAGAIN;
+               else
+                       rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+               mutex_unlock(&task->perf_event_mutex);
+
+               if (unlikely(err)) {
                        put_task_struct(task);
                        kfree(ctx);
-                       goto retry;
+
+                       if (err == -EAGAIN)
+                               goto retry;
+                       goto errout;
                }
        }
 
-       put_task_struct(task);
        return ctx;
 
 errout:
-       put_task_struct(task);
        return ERR_PTR(err);
 }
 
@@ -2117,15 +2316,15 @@ static void free_event_rcu(struct rcu_head *head)
        kfree(event);
 }
 
-static void perf_pending_sync(struct perf_event *event);
 static void perf_buffer_put(struct perf_buffer *buffer);
 
 static void free_event(struct perf_event *event)
 {
-       perf_pending_sync(event);
+       irq_work_sync(&event->pending);
 
        if (!event->parent) {
-               atomic_dec(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_dec(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
@@ -2144,7 +2343,9 @@ static void free_event(struct perf_event *event)
        if (event->destroy)
                event->destroy(event);
 
-       put_ctx(event->ctx);
+       if (event->ctx)
+               put_ctx(event->ctx);
+
        call_rcu(&event->rcu_head, free_event_rcu);
 }
 
@@ -2178,11 +2379,6 @@ int perf_event_release_kernel(struct perf_event *event)
        raw_spin_unlock_irq(&ctx->lock);
        mutex_unlock(&ctx->mutex);
 
-       mutex_lock(&event->owner->perf_event_mutex);
-       list_del_init(&event->owner_entry);
-       mutex_unlock(&event->owner->perf_event_mutex);
-       put_task_struct(event->owner);
-
        free_event(event);
 
        return 0;
@@ -2195,35 +2391,44 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 static int perf_release(struct inode *inode, struct file *file)
 {
        struct perf_event *event = file->private_data;
+       struct task_struct *owner;
 
        file->private_data = NULL;
 
-       return perf_event_release_kernel(event);
-}
-
-static int perf_event_read_size(struct perf_event *event)
-{
-       int entry = sizeof(u64); /* value */
-       int size = 0;
-       int nr = 1;
-
-       if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-               size += sizeof(u64);
-
-       if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-               size += sizeof(u64);
-
-       if (event->attr.read_format & PERF_FORMAT_ID)
-               entry += sizeof(u64);
-
-       if (event->attr.read_format & PERF_FORMAT_GROUP) {
-               nr += event->group_leader->nr_siblings;
-               size += sizeof(u64);
+       rcu_read_lock();
+       owner = ACCESS_ONCE(event->owner);
+       /*
+        * Matches the smp_wmb() in perf_event_exit_task(). If we observe
+        * !owner it means the list deletion is complete and we can indeed
+        * free this event, otherwise we need to serialize on
+        * owner->perf_event_mutex.
+        */
+       smp_read_barrier_depends();
+       if (owner) {
+               /*
+                * Since delayed_put_task_struct() also drops the last
+                * task reference we can safely take a new reference
+                * while holding the rcu_read_lock().
+                */
+               get_task_struct(owner);
        }
+       rcu_read_unlock();
 
-       size += entry * nr;
+       if (owner) {
+               mutex_lock(&owner->perf_event_mutex);
+               /*
+                * We have to re-check the event->owner field, if it is cleared
+                * we raced with perf_event_exit_task(), acquiring the mutex
+                * ensured they're done, and we can proceed with freeing the
+                * event.
+                */
+               if (event->owner)
+                       list_del_init(&event->owner_entry);
+               mutex_unlock(&owner->perf_event_mutex);
+               put_task_struct(owner);
+       }
 
-       return size;
+       return perf_event_release_kernel(event);
 }
 
 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
@@ -2340,7 +2545,7 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
        if (event->state == PERF_EVENT_STATE_ERROR)
                return 0;
 
-       if (count < perf_event_read_size(event))
+       if (count < event->read_size)
                return -ENOSPC;
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
@@ -2423,15 +2628,13 @@ static void perf_event_for_each(struct perf_event *event,
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        struct perf_event_context *ctx = event->ctx;
-       unsigned long size;
        int ret = 0;
        u64 value;
 
-       if (!event->attr.sample_period)
+       if (!is_sampling_event(event))
                return -EINVAL;
 
-       size = copy_from_user(&value, arg, sizeof(value));
-       if (size != sizeof(value))
+       if (copy_from_user(&value, arg, sizeof(value)))
                return -EFAULT;
 
        if (!value)
@@ -3071,16 +3274,7 @@ void perf_event_wakeup(struct perf_event *event)
        }
 }
 
-/*
- * Pending wakeups
- *
- * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
- *
- * The NMI bit means we cannot possibly take locks. Therefore, maintain a
- * single linked list and use cmpxchg() to add entries lockless.
- */
-
-static void perf_pending_event(struct perf_pending_entry *entry)
+static void perf_pending_event(struct irq_work *entry)
 {
        struct perf_event *event = container_of(entry,
                        struct perf_event, pending);
@@ -3096,89 +3290,6 @@ static void perf_pending_event(struct perf_pending_entry *entry)
        }
 }
 
-#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
-
-static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
-       PENDING_TAIL,
-};
-
-static void perf_pending_queue(struct perf_pending_entry *entry,
-                              void (*func)(struct perf_pending_entry *))
-{
-       struct perf_pending_entry **head;
-
-       if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
-               return;
-
-       entry->func = func;
-
-       head = &get_cpu_var(perf_pending_head);
-
-       do {
-               entry->next = *head;
-       } while (cmpxchg(head, entry->next, entry) != entry->next);
-
-       set_perf_event_pending();
-
-       put_cpu_var(perf_pending_head);
-}
-
-static int __perf_pending_run(void)
-{
-       struct perf_pending_entry *list;
-       int nr = 0;
-
-       list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
-       while (list != PENDING_TAIL) {
-               void (*func)(struct perf_pending_entry *);
-               struct perf_pending_entry *entry = list;
-
-               list = list->next;
-
-               func = entry->func;
-               entry->next = NULL;
-               /*
-                * Ensure we observe the unqueue before we issue the wakeup,
-                * so that we won't be waiting forever.
-                * -- see perf_not_pending().
-                */
-               smp_wmb();
-
-               func(entry);
-               nr++;
-       }
-
-       return nr;
-}
-
-static inline int perf_not_pending(struct perf_event *event)
-{
-       /*
-        * If we flush on whatever cpu we run, there is a chance we don't
-        * need to wait.
-        */
-       get_cpu();
-       __perf_pending_run();
-       put_cpu();
-
-       /*
-        * Ensure we see the proper queue state before going to sleep
-        * so that we do not miss the wakeup. -- see perf_pending_handle()
-        */
-       smp_rmb();
-       return event->pending.next == NULL;
-}
-
-static void perf_pending_sync(struct perf_event *event)
-{
-       wait_event(event->waitq, perf_not_pending(event));
-}
-
-void perf_event_do_pending(void)
-{
-       __perf_pending_run();
-}
-
 /*
  * We assume there is only KVM supporting the callbacks.
  * Later on, we might change it to a list if there is
@@ -3228,8 +3339,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
 
        if (handle->nmi) {
                handle->event->pending_wakeup = 1;
-               perf_pending_queue(&handle->event->pending,
-                                  perf_pending_event);
+               irq_work_queue(&handle->event->pending);
        } else
                perf_event_wakeup(handle->event);
 }
@@ -3312,6 +3422,73 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle,
        } while (len);
 }
 
+static void __perf_event_header__init_id(struct perf_event_header *header,
+                                        struct perf_sample_data *data,
+                                        struct perf_event *event)
+{
+       u64 sample_type = event->attr.sample_type;
+
+       data->type = sample_type;
+       header->size += event->id_header_size;
+
+       if (sample_type & PERF_SAMPLE_TID) {
+               /* namespace issues */
+               data->tid_entry.pid = perf_event_pid(event, current);
+               data->tid_entry.tid = perf_event_tid(event, current);
+       }
+
+       if (sample_type & PERF_SAMPLE_TIME)
+               data->time = perf_clock();
+
+       if (sample_type & PERF_SAMPLE_ID)
+               data->id = primary_event_id(event);
+
+       if (sample_type & PERF_SAMPLE_STREAM_ID)
+               data->stream_id = event->id;
+
+       if (sample_type & PERF_SAMPLE_CPU) {
+               data->cpu_entry.cpu      = raw_smp_processor_id();
+               data->cpu_entry.reserved = 0;
+       }
+}
+
+static void perf_event_header__init_id(struct perf_event_header *header,
+                                      struct perf_sample_data *data,
+                                      struct perf_event *event)
+{
+       if (event->attr.sample_id_all)
+               __perf_event_header__init_id(header, data, event);
+}
+
+static void __perf_event__output_id_sample(struct perf_output_handle *handle,
+                                          struct perf_sample_data *data)
+{
+       u64 sample_type = data->type;
+
+       if (sample_type & PERF_SAMPLE_TID)
+               perf_output_put(handle, data->tid_entry);
+
+       if (sample_type & PERF_SAMPLE_TIME)
+               perf_output_put(handle, data->time);
+
+       if (sample_type & PERF_SAMPLE_ID)
+               perf_output_put(handle, data->id);
+
+       if (sample_type & PERF_SAMPLE_STREAM_ID)
+               perf_output_put(handle, data->stream_id);
+
+       if (sample_type & PERF_SAMPLE_CPU)
+               perf_output_put(handle, data->cpu_entry);
+}
+
+static void perf_event__output_id_sample(struct perf_event *event,
+                                        struct perf_output_handle *handle,
+                                        struct perf_sample_data *sample)
+{
+       if (event->attr.sample_id_all)
+               __perf_event__output_id_sample(handle, sample);
+}
+
 int perf_output_begin(struct perf_output_handle *handle,
                      struct perf_event *event, unsigned int size,
                      int nmi, int sample)
@@ -3319,6 +3496,7 @@ int perf_output_begin(struct perf_output_handle *handle,
        struct perf_buffer *buffer;
        unsigned long tail, offset, head;
        int have_lost;
+       struct perf_sample_data sample_data;
        struct {
                struct perf_event_header header;
                u64                      id;
@@ -3345,8 +3523,12 @@ int perf_output_begin(struct perf_output_handle *handle,
                goto out;
 
        have_lost = local_read(&buffer->lost);
-       if (have_lost)
-               size += sizeof(lost_event);
+       if (have_lost) {
+               lost_event.header.size = sizeof(lost_event);
+               perf_event_header__init_id(&lost_event.header, &sample_data,
+                                          event);
+               size += lost_event.header.size;
+       }
 
        perf_output_get_handle(handle);
 
@@ -3377,11 +3559,11 @@ int perf_output_begin(struct perf_output_handle *handle,
        if (have_lost) {
                lost_event.header.type = PERF_RECORD_LOST;
                lost_event.header.misc = 0;
-               lost_event.header.size = sizeof(lost_event);
                lost_event.id          = event->id;
                lost_event.lost        = local_xchg(&buffer->lost, 0);
 
                perf_output_put(handle, lost_event);
+               perf_event__output_id_sample(event, handle, &sample_data);
        }
 
        return 0;
@@ -3414,30 +3596,9 @@ void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
-static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
-{
-       /*
-        * only top level events have the pid namespace they were created in
-        */
-       if (event->parent)
-               event = event->parent;
-
-       return task_tgid_nr_ns(p, event->ns);
-}
-
-static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
-{
-       /*
-        * only top level events have the pid namespace they were created in
-        */
-       if (event->parent)
-               event = event->parent;
-
-       return task_pid_nr_ns(p, event->ns);
-}
-
 static void perf_output_read_one(struct perf_output_handle *handle,
-                                struct perf_event *event)
+                                struct perf_event *event,
+                                u64 enabled, u64 running)
 {
        u64 read_format = event->attr.read_format;
        u64 values[4];
@@ -3445,11 +3606,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
 
        values[n++] = perf_event_count(event);
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-               values[n++] = event->total_time_enabled +
+               values[n++] = enabled +
                        atomic64_read(&event->child_total_time_enabled);
        }
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-               values[n++] = event->total_time_running +
+               values[n++] = running +
                        atomic64_read(&event->child_total_time_running);
        }
        if (read_format & PERF_FORMAT_ID)
@@ -3462,7 +3623,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  */
 static void perf_output_read_group(struct perf_output_handle *handle,
-                           struct perf_event *event)
+                           struct perf_event *event,
+                           u64 enabled, u64 running)
 {
        struct perf_event *leader = event->group_leader, *sub;
        u64 read_format = event->attr.read_format;
@@ -3472,10 +3634,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        values[n++] = 1 + leader->nr_siblings;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-               values[n++] = leader->total_time_enabled;
+               values[n++] = enabled;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-               values[n++] = leader->total_time_running;
+               values[n++] = running;
 
        if (leader != event)
                leader->pmu->read(leader);
@@ -3500,13 +3662,35 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        }
 }
 
+#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
+                                PERF_FORMAT_TOTAL_TIME_RUNNING)
+
 static void perf_output_read(struct perf_output_handle *handle,
                             struct perf_event *event)
 {
+       u64 enabled = 0, running = 0, now, ctx_time;
+       u64 read_format = event->attr.read_format;
+
+       /*
+        * compute total_time_enabled, total_time_running
+        * based on snapshot values taken when the event
+        * was last scheduled in.
+        *
+        * we cannot simply called update_context_time()
+        * because of locking issue as we are called in
+        * NMI context
+        */
+       if (read_format & PERF_FORMAT_TOTAL_TIMES) {
+               now = perf_clock();
+               ctx_time = event->shadow_ctx_time + now;
+               enabled = ctx_time - event->tstamp_enabled;
+               running = ctx_time - event->tstamp_running;
+       }
+
        if (event->attr.read_format & PERF_FORMAT_GROUP)
-               perf_output_read_group(handle, event);
+               perf_output_read_group(handle, event, enabled, running);
        else
-               perf_output_read_one(handle, event);
+               perf_output_read_one(handle, event, enabled, running);
 }
 
 void perf_output_sample(struct perf_output_handle *handle,
@@ -3586,61 +3770,16 @@ void perf_prepare_sample(struct perf_event_header *header,
 {
        u64 sample_type = event->attr.sample_type;
 
-       data->type = sample_type;
-
        header->type = PERF_RECORD_SAMPLE;
-       header->size = sizeof(*header);
+       header->size = sizeof(*header) + event->header_size;
 
        header->misc = 0;
-       header->misc |= perf_misc_flags(regs);
-
-       if (sample_type & PERF_SAMPLE_IP) {
-               data->ip = perf_instruction_pointer(regs);
-
-               header->size += sizeof(data->ip);
-       }
-
-       if (sample_type & PERF_SAMPLE_TID) {
-               /* namespace issues */
-               data->tid_entry.pid = perf_event_pid(event, current);
-               data->tid_entry.tid = perf_event_tid(event, current);
-
-               header->size += sizeof(data->tid_entry);
-       }
-
-       if (sample_type & PERF_SAMPLE_TIME) {
-               data->time = perf_clock();
-
-               header->size += sizeof(data->time);
-       }
-
-       if (sample_type & PERF_SAMPLE_ADDR)
-               header->size += sizeof(data->addr);
-
-       if (sample_type & PERF_SAMPLE_ID) {
-               data->id = primary_event_id(event);
-
-               header->size += sizeof(data->id);
-       }
-
-       if (sample_type & PERF_SAMPLE_STREAM_ID) {
-               data->stream_id = event->id;
-
-               header->size += sizeof(data->stream_id);
-       }
-
-       if (sample_type & PERF_SAMPLE_CPU) {
-               data->cpu_entry.cpu             = raw_smp_processor_id();
-               data->cpu_entry.reserved        = 0;
-
-               header->size += sizeof(data->cpu_entry);
-       }
+       header->misc |= perf_misc_flags(regs);
 
-       if (sample_type & PERF_SAMPLE_PERIOD)
-               header->size += sizeof(data->period);
+       __perf_event_header__init_id(header, data, event);
 
-       if (sample_type & PERF_SAMPLE_READ)
-               header->size += perf_event_read_size(event);
+       if (sample_type & PERF_SAMPLE_IP)
+               data->ip = perf_instruction_pointer(regs);
 
        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
                int size = 1;
@@ -3705,23 +3844,26 @@ perf_event_read_event(struct perf_event *event,
                        struct task_struct *task)
 {
        struct perf_output_handle handle;
+       struct perf_sample_data sample;
        struct perf_read_event read_event = {
                .header = {
                        .type = PERF_RECORD_READ,
                        .misc = 0,
-                       .size = sizeof(read_event) + perf_event_read_size(event),
+                       .size = sizeof(read_event) + event->read_size,
                },
                .pid = perf_event_pid(event, task),
                .tid = perf_event_tid(event, task),
        };
        int ret;
 
+       perf_event_header__init_id(&read_event.header, &sample, event);
        ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
        if (ret)
                return;
 
        perf_output_put(&handle, read_event);
        perf_output_read(&handle, event);
+       perf_event__output_id_sample(event, &handle, &sample);
 
        perf_output_end(&handle);
 }
@@ -3751,14 +3893,16 @@ static void perf_event_task_output(struct perf_event *event,
                                     struct perf_task_event *task_event)
 {
        struct perf_output_handle handle;
+       struct perf_sample_data sample;
        struct task_struct *task = task_event->task;
-       int size, ret;
+       int ret, size = task_event->event_id.header.size;
 
-       size  = task_event->event_id.header.size;
-       ret = perf_output_begin(&handle, event, size, 0, 0);
+       perf_event_header__init_id(&task_event->event_id.header, &sample, event);
 
+       ret = perf_output_begin(&handle, event,
+                               task_event->event_id.header.size, 0, 0);
        if (ret)
-               return;
+               goto out;
 
        task_event->event_id.pid = perf_event_pid(event, task);
        task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3768,7 +3912,11 @@ static void perf_event_task_output(struct perf_event *event,
 
        perf_output_put(&handle, task_event->event_id);
 
+       perf_event__output_id_sample(event, &handle, &sample);
+
        perf_output_end(&handle);
+out:
+       task_event->event_id.header.size = size;
 }
 
 static int perf_event_task_match(struct perf_event *event)
@@ -3776,7 +3924,7 @@ static int perf_event_task_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm || event->attr.mmap ||
@@ -3804,22 +3952,26 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        struct pmu *pmu;
        int ctxn;
 
-       rcu_read_lock_sched();
+       rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_task_ctx(&cpuctx->ctx, task_event);
 
                ctx = task_event->task_ctx;
                if (!ctx) {
                        ctxn = pmu->task_ctx_nr;
                        if (ctxn < 0)
-                               continue;
+                               goto next;
                        ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
                }
                if (ctx)
                        perf_event_task_ctx(ctx, task_event);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
        }
-       rcu_read_unlock_sched();
+       rcu_read_unlock();
 }
 
 static void perf_event_task(struct task_struct *task,
@@ -3879,11 +4031,16 @@ static void perf_event_comm_output(struct perf_event *event,
                                     struct perf_comm_event *comm_event)
 {
        struct perf_output_handle handle;
+       struct perf_sample_data sample;
        int size = comm_event->event_id.header.size;
-       int ret = perf_output_begin(&handle, event, size, 0, 0);
+       int ret;
+
+       perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
+       ret = perf_output_begin(&handle, event,
+                               comm_event->event_id.header.size, 0, 0);
 
        if (ret)
-               return;
+               goto out;
 
        comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
        comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
@@ -3891,7 +4048,12 @@ static void perf_event_comm_output(struct perf_event *event,
        perf_output_put(&handle, comm_event->event_id);
        perf_output_copy(&handle, comm_event->comm,
                                   comm_event->comm_size);
+
+       perf_event__output_id_sample(event, &handle, &sample);
+
        perf_output_end(&handle);
+out:
+       comm_event->event_id.header.size = size;
 }
 
 static int perf_event_comm_match(struct perf_event *event)
@@ -3899,7 +4061,7 @@ static int perf_event_comm_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm)
@@ -3936,21 +4098,24 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        comm_event->comm_size = size;
 
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
-
-       rcu_read_lock_sched();
+       rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_comm_ctx(&cpuctx->ctx, comm_event);
 
                ctxn = pmu->task_ctx_nr;
                if (ctxn < 0)
-                       continue;
+                       goto next;
 
                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
                if (ctx)
                        perf_event_comm_ctx(ctx, comm_event);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
        }
-       rcu_read_unlock_sched();
+       rcu_read_unlock();
 }
 
 void perf_event_comm(struct task_struct *task)
@@ -4013,11 +4178,15 @@ static void perf_event_mmap_output(struct perf_event *event,
                                     struct perf_mmap_event *mmap_event)
 {
        struct perf_output_handle handle;
+       struct perf_sample_data sample;
        int size = mmap_event->event_id.header.size;
-       int ret = perf_output_begin(&handle, event, size, 0, 0);
+       int ret;
 
+       perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
+       ret = perf_output_begin(&handle, event,
+                               mmap_event->event_id.header.size, 0, 0);
        if (ret)
-               return;
+               goto out;
 
        mmap_event->event_id.pid = perf_event_pid(event, current);
        mmap_event->event_id.tid = perf_event_tid(event, current);
@@ -4025,7 +4194,12 @@ static void perf_event_mmap_output(struct perf_event *event,
        perf_output_put(&handle, mmap_event->event_id);
        perf_output_copy(&handle, mmap_event->file_name,
                                   mmap_event->file_size);
+
+       perf_event__output_id_sample(event, &handle, &sample);
+
        perf_output_end(&handle);
+out:
+       mmap_event->event_id.header.size = size;
 }
 
 static int perf_event_mmap_match(struct perf_event *event,
@@ -4035,7 +4209,7 @@ static int perf_event_mmap_match(struct perf_event *event,
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if ((!executable && event->attr.mmap_data) ||
@@ -4120,23 +4294,27 @@ got_name:
 
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
-       rcu_read_lock_sched();
+       rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
                                        vma->vm_flags & VM_EXEC);
 
                ctxn = pmu->task_ctx_nr;
                if (ctxn < 0)
-                       continue;
+                       goto next;
 
                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
                if (ctx) {
                        perf_event_mmap_ctx(ctx, mmap_event,
                                        vma->vm_flags & VM_EXEC);
                }
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
        }
-       rcu_read_unlock_sched();
+       rcu_read_unlock();
 
        kfree(buf);
 }
@@ -4176,6 +4354,7 @@ void perf_event_mmap(struct vm_area_struct *vma)
 static void perf_log_throttle(struct perf_event *event, int enable)
 {
        struct perf_output_handle handle;
+       struct perf_sample_data sample;
        int ret;
 
        struct {
@@ -4197,11 +4376,15 @@ static void perf_log_throttle(struct perf_event *event, int enable)
        if (enable)
                throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
 
-       ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
+       perf_event_header__init_id(&throttle_event.header, &sample, event);
+
+       ret = perf_output_begin(&handle, event,
+                               throttle_event.header.size, 1, 0);
        if (ret)
                return;
 
        perf_output_put(&handle, throttle_event);
+       perf_event__output_id_sample(event, &handle, &sample);
        perf_output_end(&handle);
 }
 
@@ -4217,6 +4400,13 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
 
+       /*
+        * Non-sampling counters might still use the PMI to fold short
+        * hardware counters, ignore those.
+        */
+       if (unlikely(!is_sampling_event(event)))
+               return 0;
+
        if (!throttle) {
                hwc->interrupts++;
        } else {
@@ -4257,12 +4447,8 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        if (events && atomic_dec_and_test(&event->event_limit)) {
                ret = 1;
                event->pending_kill = POLL_HUP;
-               if (nmi) {
-                       event->pending_disable = 1;
-                       perf_pending_queue(&event->pending,
-                                          perf_pending_event);
-               } else
-                       perf_event_disable(event);
+               event->pending_disable = 1;
+               irq_work_queue(&event->pending);
        }
 
        if (event->overflow_handler)
@@ -4363,7 +4549,7 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
        if (!regs)
                return;
 
-       if (!hwc->sample_period)
+       if (!is_sampling_event(event))
                return;
 
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
@@ -4379,7 +4565,7 @@ static int perf_exclude_event(struct perf_event *event,
                              struct pt_regs *regs)
 {
        if (event->hw.state & PERF_HES_STOPPED)
-               return 0;
+               return 1;
 
        if (regs) {
                if (event->attr.exclude_user && user_mode(regs))
@@ -4490,7 +4676,7 @@ int perf_swevent_get_recursion_context(void)
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void inline perf_swevent_put_recursion_context(int rctx)
+inline void perf_swevent_put_recursion_context(int rctx)
 {
        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
@@ -4526,7 +4712,7 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        struct hw_perf_event *hwc = &event->hw;
        struct hlist_head *head;
 
-       if (hwc->sample_period) {
+       if (is_sampling_event(event)) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
@@ -4671,7 +4857,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
 
        WARN_ON(event->parent);
 
-       atomic_dec(&perf_swevent_enabled[event_id]);
+       jump_label_dec(&perf_swevent_enabled[event_id]);
        swevent_hlist_put(event);
 }
 
@@ -4691,7 +4877,7 @@ static int perf_swevent_init(struct perf_event *event)
                break;
        }
 
-       if (event_id > PERF_COUNT_SW_MAX)
+       if (event_id >= PERF_COUNT_SW_MAX)
                return -ENOENT;
 
        if (!event->parent) {
@@ -4701,7 +4887,7 @@ static int perf_swevent_init(struct perf_event *event)
                if (err)
                        return err;
 
-               atomic_inc(&perf_swevent_enabled[event_id]);
+               jump_label_inc(&perf_swevent_enabled[event_id]);
                event->destroy = sw_perf_event_destroy;
        }
 
@@ -4735,6 +4921,8 @@ static int perf_tp_event_match(struct perf_event *event,
                                struct perf_sample_data *data,
                                struct pt_regs *regs)
 {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
        /*
         * All tracepoints are from kernel-space.
         */
@@ -4783,15 +4971,6 @@ static int perf_tp_event_init(struct perf_event *event)
        if (event->attr.type != PERF_TYPE_TRACEPOINT)
                return -ENOENT;
 
-       /*
-        * Raw tracepoint data is a severe data leak, only allow root to
-        * have these.
-        */
-       if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
-                       perf_paranoid_tracepoint_raw() &&
-                       !capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
        err = perf_trace_init(event);
        if (err)
                return err;
@@ -4814,7 +4993,7 @@ static struct pmu perf_tracepoint = {
 
 static inline void perf_tp_register(void)
 {
-       perf_pmu_register(&perf_tracepoint);
+       perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4904,31 +5083,33 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 static void perf_swevent_start_hrtimer(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
+       s64 period;
+
+       if (!is_sampling_event(event))
+               return;
 
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               s64 period = local64_read(&hwc->period_left);
 
-               if (period) {
-                       if (period < 0)
-                               period = 10000;
+       period = local64_read(&hwc->period_left);
+       if (period) {
+               if (period < 0)
+                       period = 10000;
 
-                       local64_set(&hwc->period_left, 0);
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
+               local64_set(&hwc->period_left, 0);
+       } else {
+               period = max_t(u64, 10000, hwc->sample_period);
+       }
+       __hrtimer_start_range_ns(&hwc->hrtimer,
                                ns_to_ktime(period), 0,
                                HRTIMER_MODE_REL_PINNED, 0);
-       }
 }
 
 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       if (hwc->sample_period) {
+       if (is_sampling_event(event)) {
                ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
                local64_set(&hwc->period_left, ktime_to_ns(remaining));
 
@@ -5123,25 +5304,96 @@ static void *find_pmu_context(int ctxn)
        return NULL;
 }
 
-static void free_pmu_context(void * __percpu cpu_context)
+static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
 {
-       struct pmu *pmu;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+               if (cpuctx->active_pmu == old_pmu)
+                       cpuctx->active_pmu = pmu;
+       }
+}
+
+static void free_pmu_context(struct pmu *pmu)
+{
+       struct pmu *i;
 
        mutex_lock(&pmus_lock);
        /*
         * Like a real lame refcount.
         */
-       list_for_each_entry(pmu, &pmus, entry) {
-               if (pmu->pmu_cpu_context == cpu_context)
+       list_for_each_entry(i, &pmus, entry) {
+               if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
+                       update_pmu_context(i, pmu);
                        goto out;
+               }
        }
 
-       free_percpu(cpu_context);
+       free_percpu(pmu->pmu_cpu_context);
 out:
        mutex_unlock(&pmus_lock);
 }
+static struct idr pmu_idr;
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *page)
+{
+       struct pmu *pmu = dev_get_drvdata(dev);
+
+       return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
+}
+
+static struct device_attribute pmu_dev_attrs[] = {
+       __ATTR_RO(type),
+       __ATTR_NULL,
+};
+
+static int pmu_bus_running;
+static struct bus_type pmu_bus = {
+       .name           = "event_source",
+       .dev_attrs      = pmu_dev_attrs,
+};
+
+static void pmu_dev_release(struct device *dev)
+{
+       kfree(dev);
+}
+
+static int pmu_dev_alloc(struct pmu *pmu)
+{
+       int ret = -ENOMEM;
+
+       pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+       if (!pmu->dev)
+               goto out;
+
+       device_initialize(pmu->dev);
+       ret = dev_set_name(pmu->dev, "%s", pmu->name);
+       if (ret)
+               goto free_dev;
+
+       dev_set_drvdata(pmu->dev, pmu);
+       pmu->dev->bus = &pmu_bus;
+       pmu->dev->release = pmu_dev_release;
+       ret = device_add(pmu->dev);
+       if (ret)
+               goto free_dev;
+
+out:
+       return ret;
+
+free_dev:
+       put_device(pmu->dev);
+       goto out;
+}
+
+static struct lock_class_key cpuctx_mutex;
 
-int perf_pmu_register(struct pmu *pmu)
+int perf_pmu_register(struct pmu *pmu, char *name, int type)
 {
        int cpu, ret;
 
@@ -5151,23 +5403,50 @@ int perf_pmu_register(struct pmu *pmu)
        if (!pmu->pmu_disable_count)
                goto unlock;
 
+       pmu->type = -1;
+       if (!name)
+               goto skip_type;
+       pmu->name = name;
+
+       if (type < 0) {
+               int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
+               if (!err)
+                       goto free_pdc;
+
+               err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
+               if (err) {
+                       ret = err;
+                       goto free_pdc;
+               }
+       }
+       pmu->type = type;
+
+       if (pmu_bus_running) {
+               ret = pmu_dev_alloc(pmu);
+               if (ret)
+                       goto free_idr;
+       }
+
+skip_type:
        pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
        if (pmu->pmu_cpu_context)
                goto got_cpu_context;
 
        pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
        if (!pmu->pmu_cpu_context)
-               goto free_pdc;
+               goto free_dev;
 
        for_each_possible_cpu(cpu) {
                struct perf_cpu_context *cpuctx;
 
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                __perf_event_init_context(&cpuctx->ctx);
+               lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
+               cpuctx->ctx.type = cpu_context;
                cpuctx->ctx.pmu = pmu;
-               cpuctx->timer_interval = TICK_NSEC;
-               hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-               cpuctx->timer.function = perf_event_context_tick;
+               cpuctx->jiffies_interval = 1;
+               INIT_LIST_HEAD(&cpuctx->rotation_list);
+               cpuctx->active_pmu = pmu;
        }
 
 got_cpu_context:
@@ -5200,6 +5479,14 @@ unlock:
 
        return ret;
 
+free_dev:
+       device_del(pmu->dev);
+       put_device(pmu->dev);
+
+free_idr:
+       if (pmu->type >= PERF_TYPE_MAX)
+               idr_remove(&pmu_idr, pmu->type);
+
 free_pdc:
        free_percpu(pmu->pmu_disable_count);
        goto unlock;
@@ -5212,13 +5499,18 @@ void perf_pmu_unregister(struct pmu *pmu)
        mutex_unlock(&pmus_lock);
 
        /*
-        * We use the pmu list either under SRCU or preempt_disable,
-        * synchronize_srcu() implies synchronize_sched() so we're good.
+        * We dereference the pmu list under both SRCU and regular RCU, so
+        * synchronize against both of those.
         */
        synchronize_srcu(&pmus_srcu);
+       synchronize_rcu();
 
        free_percpu(pmu->pmu_disable_count);
-       free_pmu_context(pmu->pmu_cpu_context);
+       if (pmu->type >= PERF_TYPE_MAX)
+               idr_remove(&pmu_idr, pmu->type);
+       device_del(pmu->dev);
+       put_device(pmu->dev);
+       free_pmu_context(pmu);
 }
 
 struct pmu *perf_init_event(struct perf_event *event)
@@ -5227,15 +5519,25 @@ struct pmu *perf_init_event(struct perf_event *event)
        int idx;
 
        idx = srcu_read_lock(&pmus_srcu);
+
+       rcu_read_lock();
+       pmu = idr_find(&pmu_idr, event->attr.type);
+       rcu_read_unlock();
+       if (pmu)
+               goto unlock;
+
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                int ret = pmu->event_init(event);
                if (!ret)
-                       break;
+                       goto unlock;
+
                if (ret != -ENOENT) {
                        pmu = ERR_PTR(ret);
-                       break;
+                       goto unlock;
                }
        }
+       pmu = ERR_PTR(-ENOENT);
+unlock:
        srcu_read_unlock(&pmus_srcu, idx);
 
        return pmu;
@@ -5246,15 +5548,21 @@ struct pmu *perf_init_event(struct perf_event *event)
  */
 static struct perf_event *
 perf_event_alloc(struct perf_event_attr *attr, int cpu,
-                  struct perf_event *group_leader,
-                  struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler)
+                struct task_struct *task,
+                struct perf_event *group_leader,
+                struct perf_event *parent_event,
+                perf_overflow_handler_t overflow_handler)
 {
        struct pmu *pmu;
        struct perf_event *event;
        struct hw_perf_event *hwc;
        long err;
 
+       if ((unsigned)cpu >= nr_cpu_ids) {
+               if (!task || cpu != -1)
+                       return ERR_PTR(-EINVAL);
+       }
+
        event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
@@ -5273,6 +5581,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
        init_waitqueue_head(&event->waitq);
+       init_irq_work(&event->pending, perf_pending_event);
 
        mutex_init(&event->mmap_mutex);
 
@@ -5289,9 +5598,20 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (task) {
+               event->attach_state = PERF_ATTACH_TASK;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+               /*
+                * hw_breakpoint is a bit difficult here..
+                */
+               if (attr->type == PERF_TYPE_BREAKPOINT)
+                       event->hw.bp_target = task;
+#endif
+       }
+
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
-       
+
        event->overflow_handler = overflow_handler;
 
        if (attr->disabled)
@@ -5332,7 +5652,8 @@ done:
        event->pmu = pmu;
 
        if (!event->parent) {
-               atomic_inc(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_inc(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
@@ -5493,13 +5814,16 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
-       struct perf_event *event, *group_leader = NULL, *output_event = NULL;
+       struct perf_event *group_leader = NULL, *output_event = NULL;
+       struct perf_event *event, *sibling;
        struct perf_event_attr attr;
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
+       struct task_struct *task = NULL;
        struct pmu *pmu;
        int event_fd;
+       int move_group = 0;
        int fput_needed = 0;
        int err;
 
@@ -5525,17 +5849,11 @@ SYSCALL_DEFINE5(perf_event_open,
        if (event_fd < 0)
                return event_fd;
 
-       event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_fd;
-       }
-
        if (group_fd != -1) {
                group_leader = perf_fget_light(group_fd, &fput_needed);
                if (IS_ERR(group_leader)) {
                        err = PTR_ERR(group_leader);
-                       goto err_alloc;
+                       goto err_fd;
                }
                group_file = group_leader->filp;
                if (flags & PERF_FLAG_FD_OUTPUT)
@@ -5544,21 +5862,61 @@ SYSCALL_DEFINE5(perf_event_open,
                        group_leader = NULL;
        }
 
+       if (pid != -1) {
+               task = find_lively_task_by_vpid(pid);
+               if (IS_ERR(task)) {
+                       err = PTR_ERR(task);
+                       goto err_group_fd;
+               }
+       }
+
+       event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err_task;
+       }
+
        /*
         * Special case software events and allow them to be part of
         * any hardware group.
         */
        pmu = event->pmu;
-       if ((pmu->task_ctx_nr == perf_sw_context) && group_leader)
-               pmu = group_leader->pmu;
+
+       if (group_leader &&
+           (is_software_event(event) != is_software_event(group_leader))) {
+               if (is_software_event(event)) {
+                       /*
+                        * If event and group_leader are not both a software
+                        * event, and event is, then group leader is not.
+                        *
+                        * Allow the addition of software events to !software
+                        * groups, this is safe because software events never
+                        * fail to schedule.
+                        */
+                       pmu = group_leader->pmu;
+               } else if (is_software_event(group_leader) &&
+                          (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+                       /*
+                        * In case the group is a pure software group, and we
+                        * try to add a hardware event, move the whole group to
+                        * the hardware context.
+                        */
+                       move_group = 1;
+               }
+       }
 
        /*
         * Get the target context (task or percpu):
         */
-       ctx = find_get_context(pmu, pid, cpu);
+       ctx = find_get_context(pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
-               goto err_group_fd;
+               goto err_alloc;
+       }
+
+       if (task) {
+               put_task_struct(task);
+               task = NULL;
        }
 
        /*
@@ -5577,8 +5935,14 @@ SYSCALL_DEFINE5(perf_event_open,
                 * Do not allow to attach to a group in a different
                 * task or CPU context:
                 */
-               if (group_leader->ctx != ctx)
-                       goto err_context;
+               if (move_group) {
+                       if (group_leader->ctx->type != ctx->type)
+                               goto err_context;
+               } else {
+                       if (group_leader->ctx != ctx)
+                               goto err_context;
+               }
+
                /*
                 * Only a group leader can be exclusive or pinned
                 */
@@ -5598,20 +5962,51 @@ SYSCALL_DEFINE5(perf_event_open,
                goto err_context;
        }
 
+       if (move_group) {
+               struct perf_event_context *gctx = group_leader->ctx;
+
+               mutex_lock(&gctx->mutex);
+               perf_event_remove_from_context(group_leader);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_event_remove_from_context(sibling);
+                       put_ctx(gctx);
+               }
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
+       }
+
        event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
+
+       if (move_group) {
+               perf_install_in_context(ctx, group_leader, cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_install_in_context(ctx, sibling, cpu);
+                       get_ctx(ctx);
+               }
+       }
+
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
 
        event->owner = current;
-       get_task_struct(current);
+
        mutex_lock(&current->perf_event_mutex);
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
 
        /*
+        * Precalculate sample_data sizes
+        */
+       perf_event__header_size(event);
+       perf_event__id_header_size(event);
+
+       /*
         * Drop the reference on the group_event after placing the
         * new event on the sibling_list. This ensures destruction
         * of the group leader will find the pointer to itself in
@@ -5623,10 +6018,13 @@ SYSCALL_DEFINE5(perf_event_open,
 
 err_context:
        put_ctx(ctx);
-err_group_fd:
-       fput_light(group_file, fput_needed);
 err_alloc:
        free_event(event);
+err_task:
+       if (task)
+               put_task_struct(task);
+err_group_fd:
+       fput_light(group_file, fput_needed);
 err_fd:
        put_unused_fd(event_fd);
        return err;
@@ -5637,11 +6035,11 @@ err_fd:
  *
  * @attr: attributes of the counter to create
  * @cpu: cpu in which the counter is bound
- * @pid: task to profile
+ * @task: task to profile (NULL for percpu)
  */
 struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
-                                pid_t pid,
+                                struct task_struct *task,
                                 perf_overflow_handler_t overflow_handler)
 {
        struct perf_event_context *ctx;
@@ -5652,13 +6050,13 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
         * Get the target context (task or percpu):
         */
 
-       event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
+       event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
                goto err;
        }
 
-       ctx = find_get_context(event->pmu, pid, cpu);
+       ctx = find_get_context(event->pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto err_free;
@@ -5671,12 +6069,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
 
-       event->owner = current;
-       get_task_struct(current);
-       mutex_lock(&current->perf_event_mutex);
-       list_add_tail(&event->owner_entry, &current->perf_event_list);
-       mutex_unlock(&current->perf_event_mutex);
-
        return event;
 
 err_free:
@@ -5726,17 +6118,20 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       struct perf_event *parent_event;
+       if (child_event->parent) {
+               raw_spin_lock_irq(&child_ctx->lock);
+               perf_group_detach(child_event);
+               raw_spin_unlock_irq(&child_ctx->lock);
+       }
 
        perf_event_remove_from_context(child_event);
 
-       parent_event = child_event->parent;
        /*
-        * It can happen that parent exits first, and has events
+        * It can happen that the parent exits first, and has events
         * that are still around due to the child reference. These
-        * events need to be zapped - but otherwise linger.
+        * events need to be zapped.
         */
-       if (parent_event) {
+       if (child_event->parent) {
                sync_child_event(child_event, child);
                free_event(child_event);
        }
@@ -5760,8 +6155,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp[ctxn];
-       __perf_event_task_sched_out(child_ctx);
+       child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
+       task_ctx_sched_out(child_ctx, EVENT_ALL);
 
        /*
         * Take the context lock here so that if find_get_context is
@@ -5827,8 +6222,24 @@ again:
  */
 void perf_event_exit_task(struct task_struct *child)
 {
+       struct perf_event *event, *tmp;
        int ctxn;
 
+       mutex_lock(&child->perf_event_mutex);
+       list_for_each_entry_safe(event, tmp, &child->perf_event_list,
+                                owner_entry) {
+               list_del_init(&event->owner_entry);
+
+               /*
+                * Ensure the list deletion is visible before we clear
+                * the owner, closes a race against perf_release() where
+                * we need to serialize on the owner->perf_event_mutex.
+                */
+               smp_wmb();
+               event->owner = NULL;
+       }
+       mutex_unlock(&child->perf_event_mutex);
+
        for_each_task_context_nr(ctxn)
                perf_event_exit_task_context(child, ctxn);
 }
@@ -5887,6 +6298,14 @@ again:
        }
 }
 
+void perf_event_delayed_put(struct task_struct *task)
+{
+       int ctxn;
+
+       for_each_task_context_nr(ctxn)
+               WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
+}
+
 /*
  * inherit a event from parent task to child task:
  */
@@ -5899,6 +6318,7 @@ inherit_event(struct perf_event *parent_event,
              struct perf_event_context *child_ctx)
 {
        struct perf_event *child_event;
+       unsigned long flags;
 
        /*
         * Instead of creating recursive hierarchies of events,
@@ -5911,6 +6331,7 @@ inherit_event(struct perf_event *parent_event,
 
        child_event = perf_event_alloc(&parent_event->attr,
                                           parent_event->cpu,
+                                          child,
                                           group_leader, parent_event,
                                           NULL);
        if (IS_ERR(child_event))
@@ -5941,9 +6362,17 @@ inherit_event(struct perf_event *parent_event,
        child_event->overflow_handler = parent_event->overflow_handler;
 
        /*
+        * Precalculate sample_data sizes
+        */
+       perf_event__header_size(child_event);
+       perf_event__id_header_size(child_event);
+
+       /*
         * Link it up in the child's context:
         */
+       raw_spin_lock_irqsave(&child_ctx->lock, flags);
        add_event_to_ctx(child_event, child_ctx);
+       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
        /*
         * Get a reference to the parent filp - we will fput it
@@ -6036,13 +6465,9 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
        struct perf_event *event;
        struct task_struct *parent = current;
        int inherited_all = 1;
+       unsigned long flags;
        int ret = 0;
 
-       child->perf_event_ctxp[ctxn] = NULL;
-
-       mutex_init(&child->perf_event_mutex);
-       INIT_LIST_HEAD(&child->perf_event_list);
-
        if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
 
@@ -6076,6 +6501,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                        break;
        }
 
+       /*
+        * We can't hold ctx->lock when iterating the ->flexible_group list due
+        * to allocations, but we need to prevent rotation because
+        * rotate_ctx() will change the list from interrupt context.
+        */
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+       parent_ctx->rotate_disable = 1;
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
@@ -6083,18 +6517,20 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                        break;
        }
 
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+       parent_ctx->rotate_disable = 0;
+
        child_ctx = child->perf_event_ctxp[ctxn];
 
        if (child_ctx && inherited_all) {
                /*
                 * Mark the child context as a clone of the parent
                 * context, or of whatever the parent is a clone of.
-                * Note that if the parent is a clone, it could get
-                * uncloned at any point, but that doesn't matter
-                * because the list of events and the generation
-                * count can't have changed since we took the mutex.
+                *
+                * Note that if the parent is a clone, the holding of
+                * parent_ctx->lock avoids it from being uncloned.
                 */
-               cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
+               cloned_ctx = parent_ctx->parent_ctx;
                if (cloned_ctx) {
                        child_ctx->parent_ctx = cloned_ctx;
                        child_ctx->parent_gen = parent_ctx->parent_gen;
@@ -6105,6 +6541,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                get_ctx(child_ctx->parent_ctx);
        }
 
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
        mutex_unlock(&parent_ctx->mutex);
 
        perf_unpin_context(parent_ctx);
@@ -6119,6 +6556,10 @@ int perf_event_init_task(struct task_struct *child)
 {
        int ctxn, ret;
 
+       memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
+       mutex_init(&child->perf_event_mutex);
+       INIT_LIST_HEAD(&child->perf_event_list);
+
        for_each_task_context_nr(ctxn) {
                ret = perf_event_init_context(child, ctxn);
                if (ret)
@@ -6136,6 +6577,7 @@ static void __init perf_event_init_all_cpus(void)
        for_each_possible_cpu(cpu) {
                swhash = &per_cpu(swevent_htable, cpu);
                mutex_init(&swhash->hlist_mutex);
+               INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
        }
 }
 
@@ -6154,7 +6596,16 @@ static void __cpuinit perf_event_init_cpu(int cpu)
        mutex_unlock(&swhash->hlist_mutex);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
+static void perf_pmu_rotate_stop(struct pmu *pmu)
+{
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+       WARN_ON(!irqs_disabled());
+
+       list_del_init(&cpuctx->rotation_list);
+}
+
 static void __perf_event_exit_context(void *__info)
 {
        struct perf_event_context *ctx = __info;
@@ -6176,14 +6627,13 @@ static void perf_event_exit_cpu_context(int cpu)
 
        idx = srcu_read_lock(&pmus_srcu);
        list_for_each_entry_rcu(pmu, &pmus, entry) {
-               ctx = &this_cpu_ptr(pmu->pmu_cpu_context)->ctx;
+               ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
 
                mutex_lock(&ctx->mutex);
                smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
                mutex_unlock(&ctx->mutex);
        }
        srcu_read_unlock(&pmus_srcu, idx);
-
 }
 
 static void perf_event_exit_cpu(int cpu)
@@ -6200,6 +6650,26 @@ static void perf_event_exit_cpu(int cpu)
 static inline void perf_event_exit_cpu(int cpu) { }
 #endif
 
+static int
+perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               perf_event_exit_cpu(cpu);
+
+       return NOTIFY_OK;
+}
+
+/*
+ * Run the perf reboot notifier at the very last possible moment so that
+ * the generic watchdog code runs as long as possible.
+ */
+static struct notifier_block perf_reboot_notifier = {
+       .notifier_call = perf_reboot,
+       .priority = INT_MIN,
+};
+
 static int __cpuinit
 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
@@ -6226,11 +6696,47 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 
 void __init perf_event_init(void)
 {
+       int ret;
+
+       idr_init(&pmu_idr);
+
        perf_event_init_all_cpus();
        init_srcu_struct(&pmus_srcu);
-       perf_pmu_register(&perf_swevent);
-       perf_pmu_register(&perf_cpu_clock);
-       perf_pmu_register(&perf_task_clock);
+       perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
+       perf_pmu_register(&perf_cpu_clock, NULL, -1);
+       perf_pmu_register(&perf_task_clock, NULL, -1);
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
+       register_reboot_notifier(&perf_reboot_notifier);
+
+       ret = init_hw_breakpoint();
+       WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
+}
+
+static int __init perf_event_sysfs_init(void)
+{
+       struct pmu *pmu;
+       int ret;
+
+       mutex_lock(&pmus_lock);
+
+       ret = bus_register(&pmu_bus);
+       if (ret)
+               goto unlock;
+
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (!pmu->name || pmu->type < 0)
+                       continue;
+
+               ret = pmu_dev_alloc(pmu);
+               WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
+       }
+       pmu_bus_running = 1;
+       ret = 0;
+
+unlock:
+       mutex_unlock(&pmus_lock);
+
+       return ret;
 }
+device_initcall(perf_event_sysfs_init);