virtio_net: invoke softirqs after __napi_schedule
[linux-flexiantxendom0-3.2.10.git] / kernel / cgroup_freezer.c
index 5e828a2..f86e939 100644 (file)
@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
                            struct freezer, css);
 }
 
-static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
+bool cgroup_freezing(struct task_struct *task)
 {
-       enum freezer_state state = task_freezer(task)->state;
-       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
-}
+       enum freezer_state state;
+       bool ret;
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
-{
-       int result;
-       task_lock(task);
-       result = __cgroup_freezing_or_frozen(task);
-       task_unlock(task);
-       return result;
+       rcu_read_lock();
+       state = task_freezer(task)->state;
+       ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
+       rcu_read_unlock();
+
+       return ret;
 }
 
 /*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
  * freezer_can_attach():
  * cgroup_mutex (held by caller of can_attach)
  *
- * cgroup_freezing_or_frozen():
- * task->alloc_lock (to get task's cgroup)
- *
  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
  * freezer->lock
  *  sighand->siglock (if the cgroup is freezing)
@@ -130,11 +125,10 @@ struct cgroup_subsys freezer_subsys;
  *   write_lock css_set_lock (cgroup iterator start)
  *    task->alloc_lock
  *   read_lock css_set_lock (cgroup iterator start)
- *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
+ *    task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
  *     sighand->siglock
  */
-static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
-                                                 struct cgroup *cgroup)
+static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
 {
        struct freezer *freezer;
 
@@ -147,10 +141,20 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
        return &freezer->css;
 }
 
-static void freezer_destroy(struct cgroup_subsys *ss,
-                           struct cgroup *cgroup)
+static void freezer_destroy(struct cgroup *cgroup)
 {
-       kfree(cgroup_freezer(cgroup));
+       struct freezer *freezer = cgroup_freezer(cgroup);
+
+       if (freezer->state != CGROUP_THAWED)
+               atomic_dec(&system_freezing_cnt);
+       kfree(freezer);
+}
+
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+       return frozen(task) ||
+               (task_is_stopped_or_traced(task) && freezing(task));
 }
 
 /*
@@ -158,15 +162,18 @@ static void freezer_destroy(struct cgroup_subsys *ss,
  * a write to that file racing against an attach, and hence the
  * can_attach() result will remain valid until the attach completes.
  */
-static int freezer_can_attach(struct cgroup_subsys *ss,
-                             struct cgroup *new_cgroup,
-                             struct task_struct *task)
+static int freezer_can_attach(struct cgroup *new_cgroup,
+                             struct cgroup_taskset *tset)
 {
        struct freezer *freezer;
+       struct task_struct *task;
 
        /*
         * Anything frozen can't move or be moved to/from.
         */
+       cgroup_taskset_for_each(task, new_cgroup, tset)
+               if (cgroup_freezing(task))
+                       return -EBUSY;
 
        freezer = cgroup_freezer(new_cgroup);
        if (freezer->state != CGROUP_THAWED)
@@ -175,18 +182,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
        return 0;
 }
 
-static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
-{
-       rcu_read_lock();
-       if (__cgroup_freezing_or_frozen(tsk)) {
-               rcu_read_unlock();
-               return -EBUSY;
-       }
-       rcu_read_unlock();
-       return 0;
-}
-
-static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+static void freezer_fork(struct task_struct *task)
 {
        struct freezer *freezer;
 
@@ -213,7 +209,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 
        /* Locking avoids race with FREEZING -> THAWED transitions. */
        if (freezer->state == CGROUP_FREEZING)
-               freeze_task(task, true);
+               freeze_task(task);
        spin_unlock_irq(&freezer->lock);
 }
 
@@ -231,7 +227,7 @@ static void update_if_frozen(struct cgroup *cgroup,
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (frozen(task))
+               if (freezing(task) && is_task_frozen_enough(task))
                        nfrozen++;
        }
 
@@ -279,12 +275,11 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        struct task_struct *task;
        unsigned int num_cant_freeze_now = 0;
 
-       freezer->state = CGROUP_FREEZING;
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
-               if (!freeze_task(task, true))
+               if (!freeze_task(task))
                        continue;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
@@ -300,12 +295,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        struct task_struct *task;
 
        cgroup_iter_start(cgroup, &it);
-       while ((task = cgroup_iter_next(cgroup, &it))) {
-               thaw_process(task);
-       }
+       while ((task = cgroup_iter_next(cgroup, &it)))
+               __thaw_task(task);
        cgroup_iter_end(cgroup, &it);
-
-       freezer->state = CGROUP_THAWED;
 }
 
 static int freezer_change_state(struct cgroup *cgroup,
@@ -319,20 +311,24 @@ static int freezer_change_state(struct cgroup *cgroup,
        spin_lock_irq(&freezer->lock);
 
        update_if_frozen(cgroup, freezer);
-       if (goal_state == freezer->state)
-               goto out;
 
        switch (goal_state) {
        case CGROUP_THAWED:
+               if (freezer->state != CGROUP_THAWED)
+                       atomic_dec(&system_freezing_cnt);
+               freezer->state = CGROUP_THAWED;
                unfreeze_cgroup(cgroup, freezer);
                break;
        case CGROUP_FROZEN:
+               if (freezer->state == CGROUP_THAWED)
+                       atomic_inc(&system_freezing_cnt);
+               freezer->state = CGROUP_FREEZING;
                retval = try_to_freeze_cgroup(cgroup, freezer);
                break;
        default:
                BUG();
        }
-out:
+
        spin_unlock_irq(&freezer->lock);
 
        return retval;
@@ -381,10 +377,5 @@ struct cgroup_subsys freezer_subsys = {
        .populate       = freezer_populate,
        .subsys_id      = freezer_subsys_id,
        .can_attach     = freezer_can_attach,
-       .can_attach_task = freezer_can_attach_task,
-       .pre_attach     = NULL,
-       .attach_task    = NULL,
-       .attach         = NULL,
        .fork           = freezer_fork,
-       .exit           = NULL,
 };