perf_events: Generalize use of event_filter_match()
authorStephane Eranian <eranian@google.com>
Mon, 3 Jan 2011 16:20:01 +0000 (18:20 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 7 Jan 2011 14:08:51 +0000 (15:08 +0100)
Replace all occurrences of:
event->cpu != -1 && event->cpu == smp_processor_id()
by a call to:
event_filter_match(event)

This makes the code more consistent and will make the cgroup
patch smaller.

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d220593.2308e30a.48c5.ffff8ae9@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

kernel/perf_event.c

index 2c14e3a..dcdb19e 100644 (file)
@@ -949,7 +949,7 @@ static void __perf_install_in_context(void *info)
 
        add_event_to_ctx(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1094,7 +1094,7 @@ static void __perf_event_enable(void *info)
                goto unlock;
        __perf_event_mark_enabled(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1441,7 +1441,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
                if (event->state <= PERF_EVENT_STATE_OFF)
                        continue;
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, 1))
@@ -1473,7 +1473,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                 * Listen to the 'cpu' scheduling filter constraint
                 * of events:
                 */
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1700,7 +1700,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
 
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                hwc = &event->hw;
@@ -3899,7 +3899,7 @@ static int perf_event_task_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm || event->attr.mmap ||
@@ -4036,7 +4036,7 @@ static int perf_event_comm_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm)
@@ -4184,7 +4184,7 @@ static int perf_event_mmap_match(struct perf_event *event,
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if ((!executable && event->attr.mmap_data) ||