genirq: Minor readablity improvement in irq_wake_thread()
[linux-flexiantxendom0-3.2.10.git] / kernel / cgroup.c
index a85a700..f4ea4b6 100644 (file)
@@ -777,7 +777,7 @@ EXPORT_SYMBOL_GPL(cgroup_unlock);
  * -> cgroup_mkdir.
  */
 
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp);
@@ -792,7 +792,7 @@ static struct backing_dev_info cgroup_backing_dev_info = {
 static int alloc_css_id(struct cgroup_subsys *ss,
                        struct cgroup *parent, struct cgroup *child);
 
-static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
+static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
 {
        struct inode *inode = new_inode(sb);
 
@@ -818,7 +818,7 @@ static int cgroup_call_pre_destroy(struct cgroup *cgrp)
 
        for_each_subsys(cgrp->root, ss)
                if (ss->pre_destroy) {
-                       ret = ss->pre_destroy(ss, cgrp);
+                       ret = ss->pre_destroy(cgrp);
                        if (ret)
                                break;
                }
@@ -846,7 +846,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                 * Release the subsystem state objects.
                 */
                for_each_subsys(cgrp->root, ss)
-                       ss->destroy(ss, cgrp);
+                       ss->destroy(cgrp);
 
                cgrp->root->number_of_cgroups--;
                mutex_unlock(&cgroup_mutex);
@@ -938,7 +938,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
  *
  * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
  */
-DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
 
 static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
 {
@@ -1015,7 +1015,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        list_move(&ss->sibling, &root->subsys_list);
                        ss->root = root;
                        if (ss->bind)
-                               ss->bind(ss, cgrp);
+                               ss->bind(cgrp);
                        mutex_unlock(&ss->hierarchy_mutex);
                        /* refcount was already taken, and we're keeping it */
                } else if (bit & removed_bits) {
@@ -1025,7 +1025,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
                        mutex_lock(&ss->hierarchy_mutex);
                        if (ss->bind)
-                               ss->bind(ss, dummytop);
+                               ss->bind(dummytop);
                        dummytop->subsys[i]->cgroup = dummytop;
                        cgrp->subsys[i] = NULL;
                        subsys[i]->root = &rootnode;
@@ -1056,9 +1056,9 @@ static int rebind_subsystems(struct cgroupfs_root *root,
        return 0;
 }
 
-static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
 {
-       struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
+       struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
        struct cgroup_subsys *ss;
 
        mutex_lock(&cgroup_root_mutex);
@@ -1193,10 +1193,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
 
        /*
         * If the 'all' option was specified select all the subsystems,
-        * otherwise 'all, 'none' and a subsystem name options were not
-        * specified, let's default to 'all'
+        * otherwise if 'none', 'name=' and a subsystem name options
+        * were not specified, let's default to 'all'
         */
-       if (all_ss || (!all_ss && !one_ss && !opts->none)) {
+       if (all_ss || (!one_ss && !opts->none && !opts->name)) {
                for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
                        if (ss == NULL)
@@ -1472,7 +1472,6 @@ static int cgroup_get_rootdir(struct super_block *sb)
 
        struct inode *inode =
                cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
-       struct dentry *dentry;
 
        if (!inode)
                return -ENOMEM;
@@ -1481,12 +1480,9 @@ static int cgroup_get_rootdir(struct super_block *sb)
        inode->i_op = &cgroup_dir_inode_operations;
        /* directories start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
-       dentry = d_alloc_root(inode);
-       if (!dentry) {
-               iput(inode);
+       sb->s_root = d_make_root(inode);
+       if (!sb->s_root)
                return -ENOMEM;
-       }
-       sb->s_root = dentry;
        /* for everything else we want ->d_op set */
        sb->s_d_op = &cgroup_dops;
        return 0;
@@ -1763,6 +1759,7 @@ EXPORT_SYMBOL_GPL(cgroup_path);
 struct task_and_cgroup {
        struct task_struct      *task;
        struct cgroup           *cgrp;
+       struct css_set          *cg;
 };
 
 struct cgroup_taskset {
@@ -1843,11 +1840,10 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_size);
  * will already exist. If not set, this function might sleep, and can fail with
  * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
  */
-static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
-                              struct task_struct *tsk, bool guarantee)
+static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
+                               struct task_struct *tsk, struct css_set *newcg)
 {
        struct css_set *oldcg;
-       struct css_set *newcg;
 
        /*
         * We are synchronized through threadgroup_lock() against PF_EXITING
@@ -1857,23 +1853,6 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
        WARN_ON_ONCE(tsk->flags & PF_EXITING);
        oldcg = tsk->cgroups;
 
-       /* locate or allocate a new css_set for this task. */
-       if (guarantee) {
-               /* we know the css_set we want already exists. */
-               struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
-               read_lock(&css_set_lock);
-               newcg = find_existing_css_set(oldcg, cgrp, template);
-               BUG_ON(!newcg);
-               get_css_set(newcg);
-               read_unlock(&css_set_lock);
-       } else {
-               might_sleep();
-               /* find_css_set will give us newcg already referenced. */
-               newcg = find_css_set(oldcg, cgrp);
-               if (!newcg)
-                       return -ENOMEM;
-       }
-
        task_lock(tsk);
        rcu_assign_pointer(tsk->cgroups, newcg);
        task_unlock(tsk);
@@ -1892,7 +1871,6 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
        put_css_set(oldcg);
 
        set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
-       return 0;
 }
 
 /**
@@ -1910,6 +1888,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
        struct cgroup *oldcgrp;
        struct cgroupfs_root *root = cgrp->root;
        struct cgroup_taskset tset = { };
+       struct css_set *newcg;
 
        /* @tsk either already exited or can't exit until the end */
        if (tsk->flags & PF_EXITING)
@@ -1925,7 +1904,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 
        for_each_subsys(root, ss) {
                if (ss->can_attach) {
-                       retval = ss->can_attach(ss, cgrp, &tset);
+                       retval = ss->can_attach(cgrp, &tset);
                        if (retval) {
                                /*
                                 * Remember on which subsystem the can_attach()
@@ -1939,13 +1918,17 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
                }
        }
 
-       retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
-       if (retval)
+       newcg = find_css_set(tsk->cgroups, cgrp);
+       if (!newcg) {
+               retval = -ENOMEM;
                goto out;
+       }
+
+       cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg);
 
        for_each_subsys(root, ss) {
                if (ss->attach)
-                       ss->attach(ss, cgrp, &tset);
+                       ss->attach(cgrp, &tset);
        }
 
        synchronize_rcu();
@@ -1967,7 +1950,7 @@ out:
                                 */
                                break;
                        if (ss->cancel_attach)
-                               ss->cancel_attach(ss, cgrp, &tset);
+                               ss->cancel_attach(cgrp, &tset);
                }
        }
        return retval;
@@ -1997,66 +1980,6 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
-/*
- * cgroup_attach_proc works in two stages, the first of which prefetches all
- * new css_sets needed (to make sure we have enough memory before committing
- * to the move) and stores them in a list of entries of the following type.
- * TODO: possible optimization: use css_set->rcu_head for chaining instead
- */
-struct cg_list_entry {
-       struct css_set *cg;
-       struct list_head links;
-};
-
-static bool css_set_check_fetched(struct cgroup *cgrp,
-                                 struct task_struct *tsk, struct css_set *cg,
-                                 struct list_head *newcg_list)
-{
-       struct css_set *newcg;
-       struct cg_list_entry *cg_entry;
-       struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
-
-       read_lock(&css_set_lock);
-       newcg = find_existing_css_set(cg, cgrp, template);
-       read_unlock(&css_set_lock);
-
-       /* doesn't exist at all? */
-       if (!newcg)
-               return false;
-       /* see if it's already in the list */
-       list_for_each_entry(cg_entry, newcg_list, links)
-               if (cg_entry->cg == newcg)
-                       return true;
-
-       /* not found */
-       return false;
-}
-
-/*
- * Find the new css_set and store it in the list in preparation for moving the
- * given task to the given cgroup. Returns 0 or -ENOMEM.
- */
-static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
-                           struct list_head *newcg_list)
-{
-       struct css_set *newcg;
-       struct cg_list_entry *cg_entry;
-
-       /* ensure a new css_set will exist for this thread */
-       newcg = find_css_set(cg, cgrp);
-       if (!newcg)
-               return -ENOMEM;
-       /* add it to the list */
-       cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL);
-       if (!cg_entry) {
-               put_css_set(newcg);
-               return -ENOMEM;
-       }
-       cg_entry->cg = newcg;
-       list_add(&cg_entry->links, newcg_list);
-       return 0;
-}
-
 /**
  * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
  * @cgrp: the cgroup to attach to
@@ -2065,25 +1988,17 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
  * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
  * task_lock of each thread in leader's threadgroup individually in turn.
  */
-int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
 {
-       int retval, i, group_size, nr_migrating_tasks;
+       int retval, i, group_size;
        struct cgroup_subsys *ss, *failed_ss = NULL;
        /* guaranteed to be initialized later, but the compiler needs this */
-       struct css_set *oldcg;
        struct cgroupfs_root *root = cgrp->root;
        /* threadgroup list cursor and array */
        struct task_struct *tsk;
        struct task_and_cgroup *tc;
        struct flex_array *group;
        struct cgroup_taskset tset = { };
-       /*
-        * we need to make sure we have css_sets for all the tasks we're
-        * going to move -before- we actually start moving them, so that in
-        * case we get an ENOMEM we can bail out before making any changes.
-        */
-       struct list_head newcg_list;
-       struct cg_list_entry *cg_entry, *temp_nobe;
 
        /*
         * step 0: in order to do expensive, possibly blocking operations for
@@ -2102,23 +2017,14 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
        if (retval)
                goto out_free_group_list;
 
-       /* prevent changes to the threadgroup list while we take a snapshot. */
-       read_lock(&tasklist_lock);
-       if (!thread_group_leader(leader)) {
-               /*
-                * a race with de_thread from another thread's exec() may strip
-                * us of our leadership, making while_each_thread unsafe to use
-                * on this task. if this happens, there is no choice but to
-                * throw this task away and try again (from cgroup_procs_write);
-                * this is "double-double-toil-and-trouble-check locking".
-                */
-               read_unlock(&tasklist_lock);
-               retval = -EAGAIN;
-               goto out_free_group_list;
-       }
-
        tsk = leader;
-       i = nr_migrating_tasks = 0;
+       i = 0;
+       /*
+        * Prevent freeing of tasks while we take a snapshot. Tasks that are
+        * already PF_EXITING could be freed from underneath us unless we
+        * take an rcu_read_lock.
+        */
+       rcu_read_lock();
        do {
                struct task_and_cgroup ent;
 
@@ -2128,27 +2034,28 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
 
                /* as per above, nr_threads may decrease, but not increase. */
                BUG_ON(i >= group_size);
+               ent.task = tsk;
+               ent.cgrp = task_cgroup_from_root(tsk, root);
+               /* nothing to do if this task is already in the cgroup */
+               if (ent.cgrp == cgrp)
+                       continue;
                /*
                 * saying GFP_ATOMIC has no effect here because we did prealloc
                 * earlier, but it's good form to communicate our expectations.
                 */
-               ent.task = tsk;
-               ent.cgrp = task_cgroup_from_root(tsk, root);
                retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
                BUG_ON(retval != 0);
                i++;
-               if (ent.cgrp != cgrp)
-                       nr_migrating_tasks++;
        } while_each_thread(leader, tsk);
+       rcu_read_unlock();
        /* remember the number of threads in the array for later. */
        group_size = i;
        tset.tc_array = group;
        tset.tc_array_len = group_size;
-       read_unlock(&tasklist_lock);
 
        /* methods shouldn't be called if no task is actually migrating */
        retval = 0;
-       if (!nr_migrating_tasks)
+       if (!group_size)
                goto out_free_group_list;
 
        /*
@@ -2156,7 +2063,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
         */
        for_each_subsys(root, ss) {
                if (ss->can_attach) {
-                       retval = ss->can_attach(ss, cgrp, &tset);
+                       retval = ss->can_attach(cgrp, &tset);
                        if (retval) {
                                failed_ss = ss;
                                goto out_cancel_attach;
@@ -2168,23 +2075,13 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
         * step 2: make sure css_sets exist for all threads to be migrated.
         * we use find_css_set, which allocates a new one if necessary.
         */
-       INIT_LIST_HEAD(&newcg_list);
        for (i = 0; i < group_size; i++) {
                tc = flex_array_get(group, i);
-               /* nothing to do if this task is already in the cgroup */
-               if (tc->cgrp == cgrp)
-                       continue;
-               /*
-                * get old css_set pointer. threadgroup is locked so this is
-                * safe against concurrent cgroup_exit() changing this to
-                * init_css_set.
-                */
-               oldcg = tc->task->cgroups;
-
-               /* if we don't already have it in the list get a new one */
-               if (!css_set_check_fetched(cgrp, tc->task, oldcg, &newcg_list))
-                       if (retval = css_set_prefetch(cgrp, oldcg, &newcg_list))
-                               goto out_list_teardown;
+               tc->cg = find_css_set(tc->task->cgroups, cgrp);
+               if (!tc->cg) {
+                       retval = -ENOMEM;
+                       goto out_put_css_set_refs;
+               }
        }
 
        /*
@@ -2194,11 +2091,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
         */
        for (i = 0; i < group_size; i++) {
                tc = flex_array_get(group, i);
-               /* leave current thread as it is if it's already there */
-               if (tc->cgrp == cgrp)
-                       continue;
-               retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
-               BUG_ON(retval);
+               cgroup_task_migrate(cgrp, tc->cgrp, tc->task, tc->cg);
        }
        /* nothing is sensitive to fork() after this point. */
 
@@ -2207,7 +2100,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
         */
        for_each_subsys(root, ss) {
                if (ss->attach)
-                       ss->attach(ss, cgrp, &tset);
+                       ss->attach(cgrp, &tset);
        }
 
        /*
@@ -2216,21 +2109,22 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
        synchronize_rcu();
        cgroup_wakeup_rmdir_waiter(cgrp);
        retval = 0;
-out_list_teardown:
-       /* clean up the list of prefetched css_sets. */
-       list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) {
-               list_del(&cg_entry->links);
-               put_css_set(cg_entry->cg);
-               kfree(cg_entry);
+out_put_css_set_refs:
+       if (retval) {
+               for (i = 0; i < group_size; i++) {
+                       tc = flex_array_get(group, i);
+                       if (!tc->cg)
+                               break;
+                       put_css_set(tc->cg);
+               }
        }
 out_cancel_attach:
-       /* same deal as in cgroup_attach_task */
        if (retval) {
                for_each_subsys(root, ss) {
                        if (ss == failed_ss)
                                break;
                        if (ss->cancel_attach)
-                               ss->cancel_attach(ss, cgrp, &tset);
+                               ss->cancel_attach(cgrp, &tset);
                }
        }
 out_free_group_list:
@@ -2252,22 +2146,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
 
+retry_find_task:
+       rcu_read_lock();
        if (pid) {
-               rcu_read_lock();
                tsk = find_task_by_vpid(pid);
                if (!tsk) {
                        rcu_read_unlock();
-                       cgroup_unlock();
-                       return -ESRCH;
-               }
-               if (threadgroup) {
-                       /*
-                        * RCU protects this access, since tsk was found in the
-                        * tid map. a race with de_thread may cause group_leader
-                        * to stop being the leader, but cgroup_attach_proc will
-                        * detect it later.
-                        */
-                       tsk = tsk->group_leader;
+                       ret= -ESRCH;
+                       goto out_unlock_cgroup;
                }
                /*
                 * even if we're attaching all tasks in the thread group, we
@@ -2278,29 +2164,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
                    cred->euid != tcred->uid &&
                    cred->euid != tcred->suid) {
                        rcu_read_unlock();
-                       cgroup_unlock();
-                       return -EACCES;
+                       ret = -EACCES;
+                       goto out_unlock_cgroup;
                }
-               get_task_struct(tsk);
-               rcu_read_unlock();
-       } else {
-               if (threadgroup)
-                       tsk = current->group_leader;
-               else
-                       tsk = current;
-               get_task_struct(tsk);
-       }
-
-       threadgroup_lock(tsk);
+       } else
+               tsk = current;
 
        if (threadgroup)
+               tsk = tsk->group_leader;
+       get_task_struct(tsk);
+       rcu_read_unlock();
+
+       threadgroup_lock(tsk);
+       if (threadgroup) {
+               if (!thread_group_leader(tsk)) {
+                       /*
+                        * a race with de_thread from another thread's exec()
+                        * may strip us of our leadership, if this happens,
+                        * there is no choice but to throw this task away and
+                        * try again; this is
+                        * "double-double-toil-and-trouble-check locking".
+                        */
+                       threadgroup_unlock(tsk);
+                       put_task_struct(tsk);
+                       goto retry_find_task;
+               }
                ret = cgroup_attach_proc(cgrp, tsk);
-       else
+       } else
                ret = cgroup_attach_task(cgrp, tsk);
-
        threadgroup_unlock(tsk);
 
        put_task_struct(tsk);
+out_unlock_cgroup:
        cgroup_unlock();
        return ret;
 }
@@ -2312,16 +2207,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
 
 static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
 {
-       int ret;
-       do {
-               /*
-                * attach_proc fails with -EAGAIN if threadgroup leadership
-                * changes in the middle of the operation, in which case we need
-                * to find the task_struct for the new leader and start over.
-                */
-               ret = attach_task_by_pid(cgrp, tgid, true);
-       } while (ret == -EAGAIN);
-       return ret;
+       return attach_task_by_pid(cgrp, tgid, true);
 }
 
 /**
@@ -2631,7 +2517,7 @@ static inline struct cftype *__file_cft(struct file *file)
        return __d_cft(file->f_dentry);
 }
 
-static int cgroup_create_file(struct dentry *dentry, mode_t mode,
+static int cgroup_create_file(struct dentry *dentry, umode_t mode,
                                struct super_block *sb)
 {
        struct inode *inode;
@@ -2672,7 +2558,7 @@ static int cgroup_create_file(struct dentry *dentry, mode_t mode,
  * @mode: mode to set on new directory.
  */
 static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
-                               mode_t mode)
+                               umode_t mode)
 {
        struct dentry *parent;
        int error = 0;
@@ -2699,9 +2585,9 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
  * returns S_IRUGO if it has only a read handler
  * returns S_IWUSR if it has only a write hander
  */
-static mode_t cgroup_file_mode(const struct cftype *cft)
+static umode_t cgroup_file_mode(const struct cftype *cft)
 {
-       mode_t mode = 0;
+       umode_t mode = 0;
 
        if (cft->mode)
                return cft->mode;
@@ -2724,7 +2610,7 @@ int cgroup_add_file(struct cgroup *cgrp,
        struct dentry *dir = cgrp->dentry;
        struct dentry *dentry;
        int error;
-       mode_t mode;
+       umode_t mode;
 
        char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
        if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
@@ -2811,15 +2697,20 @@ static void cgroup_advance_iter(struct cgroup *cgrp,
  * using their cgroups capability, we don't maintain the lists running
  * through each css_set to its tasks until we see the list actually
  * used - in other words after the first call to cgroup_iter_start().
- *
- * The tasklist_lock is not held here, as do_each_thread() and
- * while_each_thread() are protected by RCU.
  */
 static void cgroup_enable_task_cg_lists(void)
 {
        struct task_struct *p, *g;
        write_lock(&css_set_lock);
        use_task_css_set_links = 1;
+       /*
+        * We need tasklist_lock because RCU is not safe against
+        * while_each_thread(). Besides, a forking task that has passed
+        * cgroup_post_fork() without seeing use_task_css_set_links = 1
+        * is not guaranteed to have its child immediately visible in the
+        * tasklist if we walk through it with RCU.
+        */
+       read_lock(&tasklist_lock);
        do_each_thread(g, p) {
                task_lock(p);
                /*
@@ -2831,10 +2722,12 @@ static void cgroup_enable_task_cg_lists(void)
                        list_add(&p->cg_list, &p->cgroups->tasks);
                task_unlock(p);
        } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
        write_unlock(&css_set_lock);
 }
 
 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
+       __acquires(css_set_lock)
 {
        /*
         * The first time anyone tries to iterate across a cgroup,
@@ -2874,6 +2767,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 }
 
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
+       __releases(css_set_lock)
 {
        read_unlock(&css_set_lock);
 }
@@ -3048,6 +2942,38 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
  *
  */
 
+/* which pidlist file are we talking about? */
+enum cgroup_filetype {
+       CGROUP_FILE_PROCS,
+       CGROUP_FILE_TASKS,
+};
+
+/*
+ * A pidlist is a list of pids that virtually represents the contents of one
+ * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
+ * a pair (one each for procs, tasks) for each pid namespace that's relevant
+ * to the cgroup.
+ */
+struct cgroup_pidlist {
+       /*
+        * used to find which pidlist is wanted. doesn't change as long as
+        * this particular list stays in the list.
+       */
+       struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
+       /* array of xids */
+       pid_t *list;
+       /* how many elements the above list has */
+       int length;
+       /* how many files are using the current array */
+       int use_count;
+       /* each of these stored in a list by its cgroup */
+       struct list_head links;
+       /* pointer to the cgroup we belong to, for list removal purposes */
+       struct cgroup *owner;
+       /* protects the other fields */
+       struct rw_semaphore mutex;
+};
+
 /*
  * The following two functions "fix" the issue where there are more pids
  * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
@@ -3798,7 +3724,7 @@ static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
  * Must be called with the mutex on the parent inode held
  */
 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
-                            mode_t mode)
+                            umode_t mode)
 {
        struct cgroup *cgrp;
        struct cgroupfs_root *root = parent->root;
@@ -3832,7 +3758,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
 
        for_each_subsys(root, ss) {
-               struct cgroup_subsys_state *css = ss->create(ss, cgrp);
+               struct cgroup_subsys_state *css = ss->create(cgrp);
 
                if (IS_ERR(css)) {
                        err = PTR_ERR(css);
@@ -3846,7 +3772,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                }
                /* At error, ->destroy() callback has to free assigned ID. */
                if (clone_children(parent) && ss->post_clone)
-                       ss->post_clone(ss, cgrp);
+                       ss->post_clone(cgrp);
        }
 
        cgroup_lock_hierarchy(root);
@@ -3880,7 +3806,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
 
        for_each_subsys(root, ss) {
                if (cgrp->subsys[ss->subsys_id])
-                       ss->destroy(ss, cgrp);
+                       ss->destroy(cgrp);
        }
 
        mutex_unlock(&cgroup_mutex);
@@ -3892,7 +3818,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        return err;
 }
 
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
        struct cgroup *c_parent = dentry->d_parent->d_fsdata;
 
@@ -4104,7 +4030,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
        /* Create the top cgroup state for this subsystem */
        list_add(&ss->sibling, &rootnode.subsys_list);
        ss->root = &rootnode;
-       css = ss->create(ss, dummytop);
+       css = ss->create(dummytop);
        /* We don't handle early failures gracefully */
        BUG_ON(IS_ERR(css));
        init_cgroup_css(css, ss, dummytop);
@@ -4193,7 +4119,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
         * no ss->create seems to need anything important in the ss struct, so
         * this can happen first (i.e. before the rootnode attachment).
         */
-       css = ss->create(ss, dummytop);
+       css = ss->create(dummytop);
        if (IS_ERR(css)) {
                /* failure case - need to deassign the subsys[] slot. */
                subsys[i] = NULL;
@@ -4211,7 +4137,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
                int ret = cgroup_init_idr(ss, css);
                if (ret) {
                        dummytop->subsys[ss->subsys_id] = NULL;
-                       ss->destroy(ss, dummytop);
+                       ss->destroy(dummytop);
                        subsys[i] = NULL;
                        mutex_unlock(&cgroup_mutex);
                        return ret;
@@ -4309,7 +4235,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
         * pointer to find their state. note that this also takes care of
         * freeing the css_id.
         */
-       ss->destroy(ss, dummytop);
+       ss->destroy(dummytop);
        dummytop->subsys[ss->subsys_id] = NULL;
 
        mutex_unlock(&cgroup_mutex);
@@ -4585,7 +4511,7 @@ void cgroup_fork_callbacks(struct task_struct *child)
                for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
                        if (ss->fork)
-                               ss->fork(ss, child);
+                               ss->fork(child);
                }
        }
 }
@@ -4601,12 +4527,32 @@ void cgroup_fork_callbacks(struct task_struct *child)
  */
 void cgroup_post_fork(struct task_struct *child)
 {
+       /*
+        * use_task_css_set_links is set to 1 before we walk the tasklist
+        * under the tasklist_lock and we read it here after we added the child
+        * to the tasklist under the tasklist_lock as well. If the child wasn't
+        * yet in the tasklist when we walked through it from
+        * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
+        * should be visible now due to the paired locking and barriers implied
+        * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
+        * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
+        * lock on fork.
+        */
        if (use_task_css_set_links) {
                write_lock(&css_set_lock);
-               task_lock(child);
-               if (list_empty(&child->cg_list))
+               if (list_empty(&child->cg_list)) {
+                       /*
+                        * It's safe to use child->cgroups without task_lock()
+                        * here because we are protected through
+                        * threadgroup_change_begin() against concurrent
+                        * css_set change in cgroup_task_migrate(). Also
+                        * the task can't exit at that point until
+                        * wake_up_new_task() is called, so we are protected
+                        * against cgroup_exit() setting child->cgroup to
+                        * init_css_set.
+                        */
                        list_add(&child->cg_list, &child->cgroups->tasks);
-               task_unlock(child);
+               }
                write_unlock(&css_set_lock);
        }
 }
@@ -4678,7 +4624,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
                                struct cgroup *old_cgrp =
                                        rcu_dereference_raw(cg->subsys[i])->cgroup;
                                struct cgroup *cgrp = task_cgroup(tsk, i);
-                               ss->exit(ss, cgrp, old_cgrp, tsk);
+                               ss->exit(cgrp, old_cgrp, tsk);
                        }
                }
        }
@@ -4935,9 +4881,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
 
        rcu_assign_pointer(id->css, NULL);
        rcu_assign_pointer(css->id, NULL);
-       write_lock(&ss->id_lock);
+       spin_lock(&ss->id_lock);
        idr_remove(&ss->idr, id->id);
-       write_unlock(&ss->id_lock);
+       spin_unlock(&ss->id_lock);
        kfree_rcu(id, rcu_head);
 }
 EXPORT_SYMBOL_GPL(free_css_id);
@@ -4963,10 +4909,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
                error = -ENOMEM;
                goto err_out;
        }
-       write_lock(&ss->id_lock);
+       spin_lock(&ss->id_lock);
        /* Don't use 0. allocates an ID of 1-65535 */
        error = idr_get_new_above(&ss->idr, newid, 1, &myid);
-       write_unlock(&ss->id_lock);
+       spin_unlock(&ss->id_lock);
 
        /* Returns error when there are no free spaces for new ID.*/
        if (error) {
@@ -4981,9 +4927,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
        return newid;
 remove_idr:
        error = -ENOSPC;
-       write_lock(&ss->id_lock);
+       spin_lock(&ss->id_lock);
        idr_remove(&ss->idr, myid);
-       write_unlock(&ss->id_lock);
+       spin_unlock(&ss->id_lock);
 err_out:
        kfree(newid);
        return ERR_PTR(error);
@@ -4995,7 +4941,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
 {
        struct css_id *newid;
 
-       rwlock_init(&ss->id_lock);
+       spin_lock_init(&ss->id_lock);
        idr_init(&ss->idr);
 
        newid = get_new_cssid(ss, 0);
@@ -5083,6 +5029,8 @@ css_get_next(struct cgroup_subsys *ss, int id,
                return NULL;
 
        BUG_ON(!ss->use_id);
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
        /* fill start point for scan */
        tmpid = id;
        while (1) {
@@ -5090,10 +5038,7 @@ css_get_next(struct cgroup_subsys *ss, int id,
                 * scan next entry from bitmap(tree), tmpid is updated after
                 * idr_get_next().
                 */
-               read_lock(&ss->id_lock);
                tmp = idr_get_next(&ss->idr, &tmpid);
-               read_unlock(&ss->id_lock);
-
                if (!tmp)
                        break;
                if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
@@ -5133,8 +5078,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
 }
 
 #ifdef CONFIG_CGROUP_DEBUG
-static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
-                                                  struct cgroup *cont)
+static struct cgroup_subsys_state *debug_create(struct cgroup *cont)
 {
        struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
 
@@ -5144,7 +5088,7 @@ static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
        return css;
 }
 
-static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
+static void debug_destroy(struct cgroup *cont)
 {
        kfree(cont->subsys[debug_subsys_id]);
 }