Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / kernel / sched / core.c
index 50045bd..9a7fe31 100644 (file)
@@ -6430,6 +6430,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                        if (!sg)
                                return -ENOMEM;
 
+                       sg->next = sg;
+
                        *per_cpu_ptr(sdd->sg, j) = sg;
 
                        sgp = kzalloc_node(sizeof(struct sched_group_power),
@@ -6453,16 +6455,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
                struct sd_data *sdd = &tl->data;
 
                for_each_cpu(j, cpu_map) {
-                       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
-                       if (sd && (sd->flags & SD_OVERLAP))
-                               free_sched_groups(sd->groups, 0);
-                       kfree(*per_cpu_ptr(sdd->sd, j));
-                       kfree(*per_cpu_ptr(sdd->sg, j));
-                       kfree(*per_cpu_ptr(sdd->sgp, j));
+                       struct sched_domain *sd;
+
+                       if (sdd->sd) {
+                               sd = *per_cpu_ptr(sdd->sd, j);
+                               if (sd && (sd->flags & SD_OVERLAP))
+                                       free_sched_groups(sd->groups, 0);
+                               kfree(*per_cpu_ptr(sdd->sd, j));
+                       }
+
+                       if (sdd->sg)
+                               kfree(*per_cpu_ptr(sdd->sg, j));
+                       if (sdd->sgp)
+                               kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
+               sdd->sd = NULL;
                free_percpu(sdd->sg);
+               sdd->sg = NULL;
                free_percpu(sdd->sgp);
+               sdd->sgp = NULL;
        }
 }