taskstats: don't allow duplicate entries in listener mode, CVE-2011-2484
authorVasiliy Kulikov <segoon@openwall.com>
Thu, 7 Jul 2011 11:17:08 +0000 (12:17 +0100)
committerSteve Conklin <sconklin@canonical.com>
Fri, 15 Jul 2011 17:21:14 +0000 (12:21 -0500)
Currently a single process may register exit handlers unlimited times.
It may lead to a bloated listeners chain and very slow process
terminations.

Eg after 10KK sent TASKSTATS_CMD_ATTR_REGISTER_CPUMASKs ~300 Mb of
kernel memory is stolen for the handlers chain and "time id" shows 2-7
seconds instead of normal 0.003.  It makes it possible to exhaust all
kernel memory and to eat much of CPU time by triggerring numerous exits
on a single CPU.

The patch limits the number of times a single process may register
itself on a single CPU to one.

One little issue is kept unfixed - as taskstats_exit() is called before
exit_files() in do_exit(), the orphaned listener entry (if it was not
explicitly deregistered) is kept until the next someone's exit() and
implicit deregistration in send_cpu_listeners().  So, if a process
registered itself as a listener exits and the next spawned process gets
the same pid, it would inherit taskstats attributes.

Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

(cherry picked from commit 26c4caea9d697043cc5a458b96411b86d7f6babd)
CVE-2011-2484
BugLink: http://bugs.launchpad.net/bugs/806390
Signed-off-by: Andy Whitcroft <apw@canonical.com>

kernel/taskstats.c

index 3971c6b..30158f8 100644 (file)
@@ -285,16 +285,18 @@ ret:
 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 {
        struct listener_list *listeners;
 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 {
        struct listener_list *listeners;
-       struct listener *s, *tmp;
+       struct listener *s, *tmp, *s2;
        unsigned int cpu;
 
        if (!cpumask_subset(mask, cpu_possible_mask))
                return -EINVAL;
 
        unsigned int cpu;
 
        if (!cpumask_subset(mask, cpu_possible_mask))
                return -EINVAL;
 
+       s = NULL;
        if (isadd == REGISTER) {
                for_each_cpu(cpu, mask) {
        if (isadd == REGISTER) {
                for_each_cpu(cpu, mask) {
-                       s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
-                                        cpu_to_node(cpu));
+                       if (!s)
+                               s = kmalloc_node(sizeof(struct listener),
+                                                GFP_KERNEL, cpu_to_node(cpu));
                        if (!s)
                                goto cleanup;
                        s->pid = pid;
                        if (!s)
                                goto cleanup;
                        s->pid = pid;
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 
                        listeners = &per_cpu(listener_array, cpu);
                        down_write(&listeners->sem);
 
                        listeners = &per_cpu(listener_array, cpu);
                        down_write(&listeners->sem);
+                       list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
+                               if (s2->pid == pid)
+                                       goto next_cpu;
+                       }
                        list_add(&s->list, &listeners->list);
                        list_add(&s->list, &listeners->list);
+                       s = NULL;
+next_cpu:
                        up_write(&listeners->sem);
                }
                        up_write(&listeners->sem);
                }
+               kfree(s);
                return 0;
        }
 
                return 0;
        }