perf: Fix software event overflow, CVE-2011-2918
[linux-flexiantxendom0-natty.git] / kernel / perf_event.c
index 2c14e3a..478ec5d 100644 (file)
@@ -62,7 +62,8 @@ static struct srcu_struct pmus_srcu;
  */
 int sysctl_perf_event_paranoid __read_mostly = 1;
 
-int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
+/* Minimum for 512 kiB + 1 user control page */
+int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
 
 /*
  * max perf event sample rate
@@ -268,6 +269,12 @@ static void update_context_time(struct perf_event_context *ctx)
        ctx->timestamp = now;
 }
 
+static u64 perf_event_time(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+       return ctx ? ctx->time : 0;
+}
+
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
  */
@@ -281,7 +288,7 @@ static void update_event_times(struct perf_event *event)
                return;
 
        if (ctx->is_active)
-               run_end = ctx->time;
+               run_end = perf_event_time(event);
        else
                run_end = event->tstamp_stopped;
 
@@ -290,7 +297,7 @@ static void update_event_times(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_INACTIVE)
                run_end = event->tstamp_stopped;
        else
-               run_end = ctx->time;
+               run_end = perf_event_time(event);
 
        event->total_time_running = run_end - event->tstamp_running;
 }
@@ -546,6 +553,7 @@ event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
        u64 delta;
        /*
         * An event which could not be activated because of
@@ -557,7 +565,7 @@ event_sched_out(struct perf_event *event,
            && !event_filter_match(event)) {
                delta = ctx->time - event->tstamp_stopped;
                event->tstamp_running += delta;
-               event->tstamp_stopped = ctx->time;
+               event->tstamp_stopped = tstamp;
        }
 
        if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -568,7 +576,7 @@ event_sched_out(struct perf_event *event,
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
-       event->tstamp_stopped = ctx->time;
+       event->tstamp_stopped = tstamp;
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
@@ -775,16 +783,33 @@ retry:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
 static int
 event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
+
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
        event->state = PERF_EVENT_STATE_ACTIVE;
        event->oncpu = smp_processor_id();
+
+       /*
+        * Unthrottle events, since we scheduled we might have missed several
+        * ticks already, also for a heavily scheduling task there is little
+        * guarantee it'll get a tick in a timely manner.
+        */
+       if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+               perf_log_throttle(event, 1);
+               event->hw.interrupts = 0;
+       }
+
        /*
         * The new state must be visible before we turn it on in the hardware:
         */
@@ -796,9 +821,9 @@ event_sched_in(struct perf_event *event,
                return -EAGAIN;
        }
 
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
+       event->tstamp_running += tstamp - event->tstamp_stopped;
 
-       event->shadow_ctx_time = ctx->time - ctx->timestamp;
+       event->shadow_ctx_time = tstamp - ctx->timestamp;
 
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
@@ -910,11 +935,13 @@ static int group_can_go_on(struct perf_event *event,
 static void add_event_to_ctx(struct perf_event *event,
                               struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
+
        list_add_event(event, ctx);
        perf_group_attach(event);
-       event->tstamp_enabled = ctx->time;
-       event->tstamp_running = ctx->time;
-       event->tstamp_stopped = ctx->time;
+       event->tstamp_enabled = tstamp;
+       event->tstamp_running = tstamp;
+       event->tstamp_stopped = tstamp;
 }
 
 /*
@@ -949,7 +976,7 @@ static void __perf_install_in_context(void *info)
 
        add_event_to_ctx(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1054,14 +1081,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
                                        struct perf_event_context *ctx)
 {
        struct perf_event *sub;
+       u64 tstamp = perf_event_time(event);
 
        event->state = PERF_EVENT_STATE_INACTIVE;
-       event->tstamp_enabled = ctx->time - event->total_time_enabled;
+       event->tstamp_enabled = tstamp - event->total_time_enabled;
        list_for_each_entry(sub, &event->sibling_list, group_entry) {
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
-                       sub->tstamp_enabled =
-                               ctx->time - sub->total_time_enabled;
-               }
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+                       sub->tstamp_enabled = tstamp - sub->total_time_enabled;
        }
 }
 
@@ -1094,7 +1120,7 @@ static void __perf_event_enable(void *info)
                goto unlock;
        __perf_event_mark_enabled(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1441,7 +1467,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
                if (event->state <= PERF_EVENT_STATE_OFF)
                        continue;
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, 1))
@@ -1473,7 +1499,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                 * Listen to the 'cpu' scheduling filter constraint
                 * of events:
                 */
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1586,10 +1612,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
        }
 }
 
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
        u64 frequency = event->attr.sample_freq;
@@ -1700,7 +1722,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
 
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                hwc = &event->hw;
@@ -1891,11 +1913,12 @@ static void __perf_event_read(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       update_context_time(ctx);
+       if (ctx->is_active)
+               update_context_time(ctx);
        update_event_times(event);
+       if (event->state == PERF_EVENT_STATE_ACTIVE)
+               event->pmu->read(event);
        raw_spin_unlock(&ctx->lock);
-
-       event->pmu->read(event);
 }
 
 static inline u64 perf_event_count(struct perf_event *event)
@@ -1989,8 +2012,7 @@ static int alloc_callchain_buffers(void)
         * accessed from NMI. Use a temporary manual per cpu allocation
         * until that gets sorted out.
         */
-       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
-               num_possible_cpus();
+       size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 
        entries = kzalloc(size, GFP_KERNEL);
        if (!entries)
@@ -2191,13 +2213,6 @@ find_lively_task_by_vpid(pid_t vpid)
        if (!task)
                return ERR_PTR(-ESRCH);
 
-       /*
-        * Can't attach events to a dying task.
-        */
-       err = -ESRCH;
-       if (task->flags & PF_EXITING)
-               goto errout;
-
        /* Reuse ptrace permission checks for now. */
        err = -EACCES;
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
@@ -2218,14 +2233,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
        unsigned long flags;
        int ctxn, err;
 
-       if (!task && cpu != -1) {
+       if (!task) {
                /* Must be root to operate on a CPU event: */
                if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
                        return ERR_PTR(-EACCES);
 
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
-
                /*
                 * We could be clever and allow to attach a event to an
                 * offline CPU and activate it when the CPU comes up, but
@@ -2261,14 +2273,27 @@ retry:
 
                get_ctx(ctx);
 
-               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
-                       /*
-                        * We raced with some other task; use
-                        * the context they set.
-                        */
+               err = 0;
+               mutex_lock(&task->perf_event_mutex);
+               /*
+                * If it has already passed perf_event_exit_task().
+                * we must see PF_EXITING, it takes this mutex too.
+                */
+               if (task->flags & PF_EXITING)
+                       err = -ESRCH;
+               else if (task->perf_event_ctxp[ctxn])
+                       err = -EAGAIN;
+               else
+                       rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+               mutex_unlock(&task->perf_event_mutex);
+
+               if (unlikely(err)) {
                        put_task_struct(task);
                        kfree(ctx);
-                       goto retry;
+
+                       if (err == -EAGAIN)
+                               goto retry;
+                       goto errout;
                }
        }
 
@@ -3899,7 +3924,7 @@ static int perf_event_task_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm || event->attr.mmap ||
@@ -4036,7 +4061,7 @@ static int perf_event_comm_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm)
@@ -4184,7 +4209,7 @@ static int perf_event_mmap_match(struct perf_event *event,
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if ((!executable && event->attr.mmap_data) ||
@@ -4422,11 +4447,8 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        if (events && atomic_dec_and_test(&event->event_limit)) {
                ret = 1;
                event->pending_kill = POLL_HUP;
-               if (nmi) {
-                       event->pending_disable = 1;
-                       irq_work_queue(&event->pending);
-               } else
-                       perf_event_disable(event);
+               event->pending_disable = 1;
+               irq_work_queue(&event->pending);
        }
 
        if (event->overflow_handler)
@@ -4543,7 +4565,7 @@ static int perf_exclude_event(struct perf_event *event,
                              struct pt_regs *regs)
 {
        if (event->hw.state & PERF_HES_STOPPED)
-               return 0;
+               return 1;
 
        if (regs) {
                if (event->attr.exclude_user && user_mode(regs))
@@ -4654,7 +4676,7 @@ int perf_swevent_get_recursion_context(void)
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void inline perf_swevent_put_recursion_context(int rctx)
+inline void perf_swevent_put_recursion_context(int rctx)
 {
        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
@@ -4899,6 +4921,8 @@ static int perf_tp_event_match(struct perf_event *event,
                                struct perf_sample_data *data,
                                struct pt_regs *regs)
 {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
        /*
         * All tracepoints are from kernel-space.
         */
@@ -5367,6 +5391,8 @@ free_dev:
        goto out;
 }
 
+static struct lock_class_key cpuctx_mutex;
+
 int perf_pmu_register(struct pmu *pmu, char *name, int type)
 {
        int cpu, ret;
@@ -5415,6 +5441,7 @@ skip_type:
 
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                __perf_event_init_context(&cpuctx->ctx);
+               lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
                cpuctx->ctx.type = cpu_context;
                cpuctx->ctx.pmu = pmu;
                cpuctx->jiffies_interval = 1;
@@ -5531,6 +5558,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        struct hw_perf_event *hwc;
        long err;
 
+       if ((unsigned)cpu >= nr_cpu_ids) {
+               if (!task || cpu != -1)
+                       return ERR_PTR(-EINVAL);
+       }
+
        event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
@@ -5579,7 +5611,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
-       
+
        event->overflow_handler = overflow_handler;
 
        if (attr->disabled)
@@ -5882,6 +5914,11 @@ SYSCALL_DEFINE5(perf_event_open,
                goto err_alloc;
        }
 
+       if (task) {
+               put_task_struct(task);
+               task = NULL;
+       }
+
        /*
         * Look up the group leader (we will attach this event to it):
         */
@@ -6081,17 +6118,20 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       struct perf_event *parent_event;
+       if (child_event->parent) {
+               raw_spin_lock_irq(&child_ctx->lock);
+               perf_group_detach(child_event);
+               raw_spin_unlock_irq(&child_ctx->lock);
+       }
 
        perf_event_remove_from_context(child_event);
 
-       parent_event = child_event->parent;
        /*
-        * It can happen that parent exits first, and has events
+        * It can happen that the parent exits first, and has events
         * that are still around due to the child reference. These
-        * events need to be zapped - but otherwise linger.
+        * events need to be zapped.
         */
-       if (parent_event) {
+       if (child_event->parent) {
                sync_child_event(child_event, child);
                free_event(child_event);
        }
@@ -6115,7 +6155,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp[ctxn];
+       child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
        task_ctx_sched_out(child_ctx, EVENT_ALL);
 
        /*
@@ -6428,11 +6468,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
        unsigned long flags;
        int ret = 0;
 
-       child->perf_event_ctxp[ctxn] = NULL;
-
-       mutex_init(&child->perf_event_mutex);
-       INIT_LIST_HEAD(&child->perf_event_list);
-
        if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
 
@@ -6484,7 +6519,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
 
        raw_spin_lock_irqsave(&parent_ctx->lock, flags);
        parent_ctx->rotate_disable = 0;
-       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 
        child_ctx = child->perf_event_ctxp[ctxn];
 
@@ -6492,12 +6526,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                /*
                 * Mark the child context as a clone of the parent
                 * context, or of whatever the parent is a clone of.
-                * Note that if the parent is a clone, it could get
-                * uncloned at any point, but that doesn't matter
-                * because the list of events and the generation
-                * count can't have changed since we took the mutex.
+                *
+                * Note that if the parent is a clone, the holding of
+                * parent_ctx->lock avoids it from being uncloned.
                 */
-               cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
+               cloned_ctx = parent_ctx->parent_ctx;
                if (cloned_ctx) {
                        child_ctx->parent_ctx = cloned_ctx;
                        child_ctx->parent_gen = parent_ctx->parent_gen;
@@ -6508,6 +6541,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                get_ctx(child_ctx->parent_ctx);
        }
 
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
        mutex_unlock(&parent_ctx->mutex);
 
        perf_unpin_context(parent_ctx);
@@ -6522,6 +6556,10 @@ int perf_event_init_task(struct task_struct *child)
 {
        int ctxn, ret;
 
+       memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
+       mutex_init(&child->perf_event_mutex);
+       INIT_LIST_HEAD(&child->perf_event_list);
+
        for_each_task_context_nr(ctxn) {
                ret = perf_event_init_context(child, ctxn);
                if (ret)