Merge tag 'pm-for-3.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-flexiantxendom0-3.2.10.git] / kernel / fork.c
index 7701470..b9372a0 100644 (file)
@@ -37,9 +37,9 @@
 #include <linux/swap.h>
 #include <linux/syscalls.h>
 #include <linux/jiffies.h>
-#include <linux/tracehook.h>
 #include <linux/futex.h>
 #include <linux/compat.h>
+#include <linux/kthread.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/rcupdate.h>
 #include <linux/ptrace.h>
 #include <linux/taskstats_kern.h>
 #include <linux/random.h>
 #include <linux/tty.h>
-#include <linux/proc_fs.h>
 #include <linux/blkdev.h>
 #include <linux/fs_struct.h>
 #include <linux/magic.h>
 #include <linux/perf_event.h>
 #include <linux/posix-timers.h>
 #include <linux/user-return-notifier.h>
+#include <linux/oom.h>
+#include <linux/khugepaged.h>
+#include <linux/signalfd.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 
 #include <trace/events/sched.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/task.h>
+
 /*
  * Protected counters by write_lock_irq(&tasklist_lock)
  */
 unsigned long total_forks;     /* Handle normal Linux uptimes. */
-int nr_threads;                /* The idle threads do not count.. */
+int nr_threads;                        /* The idle threads do not count.. */
 
 int max_threads;               /* tunable limit on nr_threads */
 
@@ -107,20 +112,25 @@ int nr_processes(void)
 }
 
 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
-# define alloc_task_struct()   kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
-# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
+# define alloc_task_struct_node(node)          \
+               kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
+# define free_task_struct(tsk)                 \
+               kmem_cache_free(task_struct_cachep, (tsk))
 static struct kmem_cache *task_struct_cachep;
 #endif
 
 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
+static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+                                                 int node)
 {
 #ifdef CONFIG_DEBUG_STACK_USAGE
        gfp_t mask = GFP_KERNEL | __GFP_ZERO;
 #else
        gfp_t mask = GFP_KERNEL;
 #endif
-       return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
+       struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
+
+       return page ? page_address(page) : NULL;
 }
 
 static inline void free_thread_info(struct thread_info *ti)
@@ -156,7 +166,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
 
 void free_task(struct task_struct *tsk)
 {
-       prop_local_destroy_single(&tsk->dirties);
        account_kernel_stack(tsk->stack, -1);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
@@ -168,6 +177,7 @@ EXPORT_SYMBOL(free_task);
 static inline void free_signal_struct(struct signal_struct *sig)
 {
        taskstats_tgid_free(sig);
+       sched_autogroup_exit(sig);
        kmem_cache_free(signal_cachep, sig);
 }
 
@@ -183,6 +193,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(atomic_read(&tsk->usage));
        WARN_ON(tsk == current);
 
+       security_task_free(tsk);
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
        put_signal_struct(tsk->signal);
@@ -190,6 +201,7 @@ void __put_task_struct(struct task_struct *tsk)
        if (!profile_handoff_task(tsk))
                free_task(tsk);
 }
+EXPORT_SYMBOL_GPL(__put_task_struct);
 
 /*
  * macro override instead of weak attribute alias, to workaround
@@ -224,7 +236,7 @@ void __init fork_init(unsigned long mempages)
        /*
         * we need to allow at least 20 threads to boot a system
         */
-       if(max_threads < 20)
+       if (max_threads < 20)
                max_threads = 20;
 
        init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
@@ -245,33 +257,30 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        struct task_struct *tsk;
        struct thread_info *ti;
        unsigned long *stackend;
-
+       int node = tsk_fork_get_node(orig);
        int err;
 
        prepare_to_copy(orig);
 
-       tsk = alloc_task_struct();
+       tsk = alloc_task_struct_node(node);
        if (!tsk)
                return NULL;
 
-       ti = alloc_thread_info(tsk);
+       ti = alloc_thread_info_node(tsk, node);
        if (!ti) {
                free_task_struct(tsk);
                return NULL;
        }
 
-       err = arch_dup_task_struct(tsk, orig);
+       err = arch_dup_task_struct(tsk, orig);
        if (err)
                goto out;
 
        tsk->stack = ti;
 
-       err = prop_local_init_single(&tsk->dirties);
-       if (err)
-               goto out;
-
        setup_thread_stack(tsk, orig);
        clear_user_return_notifier(tsk);
+       clear_tsk_need_resched(tsk);
        stackend = end_of_stack(tsk);
        *stackend = STACK_END_MAGIC;    /* for overflow detection */
 
@@ -279,9 +288,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        tsk->stack_canary = get_random_int();
 #endif
 
-       /* One for us, one for whoever does the "release_task()" (usually parent) */
-       atomic_set(&tsk->usage,2);
-       atomic_set(&tsk->fs_excl, 0);
+       /*
+        * One for us, one for whoever does the "release_task()" (usually
+        * parent)
+        */
+       atomic_set(&tsk->usage, 2);
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        tsk->btrace_seq = 0;
 #endif
@@ -300,7 +311,7 @@ out:
 #ifdef CONFIG_MMU
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       struct vm_area_struct *mpnt, *tmp, **pprev;
+       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
@@ -327,7 +338,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        retval = ksm_fork(mm, oldmm);
        if (retval)
                goto out;
+       retval = khugepaged_fork(mm, oldmm);
+       if (retval)
+               goto out;
 
+       prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
 
@@ -341,7 +356,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                charge = 0;
                if (mpnt->vm_flags & VM_ACCOUNT) {
                        unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
-                       if (security_vm_enough_memory(len))
+                       if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
                                goto fail_nomem;
                        charge = len;
                }
@@ -355,11 +370,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (IS_ERR(pol))
                        goto fail_nomem_policy;
                vma_set_policy(tmp, pol);
+               tmp->vm_mm = mm;
                if (anon_vma_fork(tmp, mpnt))
                        goto fail_nomem_anon_vma_fork;
                tmp->vm_flags &= ~VM_LOCKED;
-               tmp->vm_mm = mm;
-               tmp->vm_next = NULL;
+               tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
                        struct inode *inode = file->f_path.dentry->d_inode;
@@ -368,15 +383,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                        get_file(file);
                        if (tmp->vm_flags & VM_DENYWRITE)
                                atomic_dec(&inode->i_writecount);
-                       spin_lock(&mapping->i_mmap_lock);
+                       mutex_lock(&mapping->i_mmap_mutex);
                        if (tmp->vm_flags & VM_SHARED)
                                mapping->i_mmap_writable++;
-                       tmp->vm_truncate_count = mpnt->vm_truncate_count;
                        flush_dcache_mmap_lock(mapping);
                        /* insert tmp into the share list, just after mpnt */
                        vma_prio_tree_add(tmp, mpnt);
                        flush_dcache_mmap_unlock(mapping);
-                       spin_unlock(&mapping->i_mmap_lock);
+                       mutex_unlock(&mapping->i_mmap_mutex);
                }
 
                /*
@@ -392,6 +406,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                 */
                *pprev = tmp;
                pprev = &tmp->vm_next;
+               tmp->vm_prev = prev;
+               prev = tmp;
 
                __vma_link_rb(mm, tmp, rb_link, rb_parent);
                rb_link = &tmp->vm_rb.rb_right;
@@ -424,7 +440,7 @@ fail_nomem:
        goto out;
 }
 
-static inline int mm_alloc_pgd(struct mm_struct * mm)
+static inline int mm_alloc_pgd(struct mm_struct *mm)
 {
        mm->pgd = pgd_alloc(mm);
        if (unlikely(!mm->pgd))
@@ -432,7 +448,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
        return 0;
 }
 
-static inline void mm_free_pgd(struct mm_struct * mm)
+static inline void mm_free_pgd(struct mm_struct *mm)
 {
        pgd_free(mm, mm->pgd);
 }
@@ -469,7 +485,7 @@ static void mm_init_aio(struct mm_struct *mm)
 #endif
 }
 
-static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
+static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
 {
        atomic_set(&mm->mm_users, 1);
        atomic_set(&mm->mm_count, 1);
@@ -496,19 +512,37 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        return NULL;
 }
 
+static void check_mm(struct mm_struct *mm)
+{
+       int i;
+
+       for (i = 0; i < NR_MM_COUNTERS; i++) {
+               long x = atomic_long_read(&mm->rss_stat.count[i]);
+
+               if (unlikely(x))
+                       printk(KERN_ALERT "BUG: Bad rss-counter state "
+                                         "mm:%p idx:%d val:%ld\n", mm, i, x);
+       }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       VM_BUG_ON(mm->pmd_huge_pte);
+#endif
+}
+
 /*
  * Allocate and initialize an mm_struct.
  */
-struct mm_struct * mm_alloc(void)
+struct mm_struct *mm_alloc(void)
 {
-       struct mm_struct * mm;
+       struct mm_struct *mm;
 
        mm = allocate_mm();
-       if (mm) {
-               memset(mm, 0, sizeof(*mm));
-               mm = mm_init(mm, current);
-       }
-       return mm;
+       if (!mm)
+               return NULL;
+
+       memset(mm, 0, sizeof(*mm));
+       mm_init_cpumask(mm);
+       return mm_init(mm, current);
 }
 
 /*
@@ -522,6 +556,7 @@ void __mmdrop(struct mm_struct *mm)
        mm_free_pgd(mm);
        destroy_context(mm);
        mmu_notifier_mm_destroy(mm);
+       check_mm(mm);
        free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -536,6 +571,7 @@ void mmput(struct mm_struct *mm)
        if (atomic_dec_and_test(&mm->mm_users)) {
                exit_aio(mm);
                ksm_exit(mm);
+               khugepaged_exit(mm); /* must run before exit_mmap */
                exit_mmap(mm);
                set_mm_exe_file(mm, NULL);
                if (!list_empty(&mm->mmlist)) {
@@ -551,6 +587,57 @@ void mmput(struct mm_struct *mm)
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+/*
+ * We added or removed a vma mapping the executable. The vmas are only mapped
+ * during exec and are not mapped with the mmap system call.
+ * Callers must hold down_write() on the mm's mmap_sem for these
+ */
+void added_exe_file_vma(struct mm_struct *mm)
+{
+       mm->num_exe_file_vmas++;
+}
+
+void removed_exe_file_vma(struct mm_struct *mm)
+{
+       mm->num_exe_file_vmas--;
+       if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
+               fput(mm->exe_file);
+               mm->exe_file = NULL;
+       }
+
+}
+
+void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+{
+       if (new_exe_file)
+               get_file(new_exe_file);
+       if (mm->exe_file)
+               fput(mm->exe_file);
+       mm->exe_file = new_exe_file;
+       mm->num_exe_file_vmas = 0;
+}
+
+struct file *get_mm_exe_file(struct mm_struct *mm)
+{
+       struct file *exe_file;
+
+       /* We need mmap_sem to protect against races with removal of
+        * VM_EXECUTABLE vmas */
+       down_read(&mm->mmap_sem);
+       exe_file = mm->exe_file;
+       if (exe_file)
+               get_file(exe_file);
+       up_read(&mm->mmap_sem);
+       return exe_file;
+}
+
+static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
+{
+       /* It's safe to write the exe_file pointer without exe_file_lock because
+        * this is called during fork when the task is not yet in /proc */
+       newmm->exe_file = get_mm_exe_file(oldmm);
+}
+
 /**
  * get_task_mm - acquire a reference to the task's mm
  *
@@ -577,6 +664,58 @@ struct mm_struct *get_task_mm(struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(get_task_mm);
 
+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+{
+       struct mm_struct *mm;
+       int err;
+
+       err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
+       if (err)
+               return ERR_PTR(err);
+
+       mm = get_task_mm(task);
+       if (mm && mm != current->mm &&
+                       !ptrace_may_access(task, mode)) {
+               mmput(mm);
+               mm = ERR_PTR(-EACCES);
+       }
+       mutex_unlock(&task->signal->cred_guard_mutex);
+
+       return mm;
+}
+
+static void complete_vfork_done(struct task_struct *tsk)
+{
+       struct completion *vfork;
+
+       task_lock(tsk);
+       vfork = tsk->vfork_done;
+       if (likely(vfork)) {
+               tsk->vfork_done = NULL;
+               complete(vfork);
+       }
+       task_unlock(tsk);
+}
+
+static int wait_for_vfork_done(struct task_struct *child,
+                               struct completion *vfork)
+{
+       int killed;
+
+       freezer_do_not_count();
+       killed = wait_for_completion_killable(vfork);
+       freezer_count();
+
+       if (killed) {
+               task_lock(child);
+               child->vfork_done = NULL;
+               task_unlock(child);
+       }
+
+       put_task_struct(child);
+       return killed;
+}
+
 /* Please note the differences between mmput and mm_release.
  * mmput is called whenever we stop holding onto a mm_struct,
  * error success whatever.
@@ -592,8 +731,6 @@ EXPORT_SYMBOL_GPL(get_task_mm);
  */
 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
 {
-       struct completion *vfork_done = tsk->vfork_done;
-
        /* Get rid of any futexes when releasing the mm */
 #ifdef CONFIG_FUTEX
        if (unlikely(tsk->robust_list)) {
@@ -613,17 +750,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        /* Get rid of any cached register state */
        deactivate_mm(tsk, mm);
 
-       /* notify parent sleeping on vfork() */
-       if (vfork_done) {
-               tsk->vfork_done = NULL;
-               complete(vfork_done);
-       }
+       if (tsk->vfork_done)
+               complete_vfork_done(tsk);
 
        /*
         * If we're exiting normally, clear a user-space tid field if
         * requested.  We leave this alone when dying by signal, to leave
         * the value intact in a core dump, and to save the unnecessary
-        * trouble otherwise.  Userland only wants this done for a sys_exit.
+        * trouble, say, a killed vfork parent shouldn't touch this mm.
+        * Userland only wants this done for a sys_exit.
         */
        if (tsk->clear_child_tid) {
                if (!(tsk->flags & PF_SIGNALED) &&
@@ -657,11 +792,16 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
                goto fail_nomem;
 
        memcpy(mm, oldmm, sizeof(*mm));
+       mm_init_cpumask(mm);
 
        /* Initializing for Swap token stuff */
        mm->token_priority = 0;
        mm->last_interval = 0;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       mm->pmd_huge_pte = NULL;
+#endif
+
        if (!mm_init(mm, tsk))
                goto fail_nomem;
 
@@ -700,9 +840,9 @@ fail_nocontext:
        return NULL;
 }
 
-static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
 {
-       struct mm_struct * mm, *oldmm;
+       struct mm_struct *mm, *oldmm;
        int retval;
 
        tsk->min_flt = tsk->maj_flt = 0;
@@ -752,13 +892,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
        struct fs_struct *fs = current->fs;
        if (clone_flags & CLONE_FS) {
                /* tsk->fs is already what we want */
-               write_lock(&fs->lock);
+               spin_lock(&fs->lock);
                if (fs->in_exec) {
-                       write_unlock(&fs->lock);
+                       spin_unlock(&fs->lock);
                        return -EAGAIN;
                }
                fs->users++;
-               write_unlock(&fs->lock);
+               spin_unlock(&fs->lock);
                return 0;
        }
        tsk->fs = copy_fs_struct(fs);
@@ -767,7 +907,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
        return 0;
 }
 
-static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
 {
        struct files_struct *oldf, *newf;
        int error = 0;
@@ -798,6 +938,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
 {
 #ifdef CONFIG_BLOCK
        struct io_context *ioc = current->io_context;
+       struct io_context *new_ioc;
 
        if (!ioc)
                return 0;
@@ -809,11 +950,12 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
                if (unlikely(!tsk->io_context))
                        return -ENOMEM;
        } else if (ioprio_valid(ioc->ioprio)) {
-               tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
-               if (unlikely(!tsk->io_context))
+               new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
+               if (unlikely(!new_ioc))
                        return -ENOMEM;
 
-               tsk->io_context->ioprio = ioc->ioprio;
+               new_ioc->ioprio = ioc->ioprio;
+               put_io_context(new_ioc);
        }
 #endif
        return 0;
@@ -838,8 +980,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
 
 void __cleanup_sighand(struct sighand_struct *sighand)
 {
-       if (atomic_dec_and_test(&sighand->count))
+       if (atomic_dec_and_test(&sighand->count)) {
+               signalfd_cleanup(sighand);
                kmem_cache_free(sighand_cachep, sighand);
+       }
 }
 
 
@@ -877,9 +1021,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        if (!sig)
                return -ENOMEM;
 
-       atomic_set(&sig->sigcnt, 1);
-       atomic_set(&sig->count, 1);
+       sig->nr_threads = 1;
        atomic_set(&sig->live, 1);
+       atomic_set(&sig->sigcnt, 1);
        init_waitqueue_head(&sig->wait_chldexit);
        if (clone_flags & CLONE_NEWPID)
                sig->flags |= SIGNAL_UNKILLABLE;
@@ -897,8 +1041,20 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        posix_cpu_timers_init_group(sig);
 
        tty_audit_fork(sig);
+       sched_autogroup_fork(sig);
+
+#ifdef CONFIG_CGROUPS
+       init_rwsem(&sig->group_rwsem);
+#endif
 
        sig->oom_adj = current->signal->oom_adj;
+       sig->oom_score_adj = current->signal->oom_score_adj;
+       sig->oom_score_adj_min = current->signal->oom_score_adj_min;
+
+       sig->has_child_subreaper = current->signal->has_child_subreaper ||
+                                  current->signal->is_child_subreaper;
+
+       mutex_init(&sig->cred_guard_mutex);
 
        return 0;
 }
@@ -907,11 +1063,9 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 {
        unsigned long new_flags = p->flags;
 
-       new_flags &= ~PF_SUPERPRIV;
+       new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
        new_flags |= PF_FORKNOEXEC;
-       new_flags |= PF_STARTING;
        p->flags = new_flags;
-       clear_freeze_flag(p);
 }
 
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
@@ -925,7 +1079,7 @@ static void rt_mutex_init_task(struct task_struct *p)
 {
        raw_spin_lock_init(&p->pi_lock);
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
+       plist_head_init(&p->pi_waiters);
        p->pi_blocked_on = NULL;
 #endif
 }
@@ -942,8 +1096,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
  */
 static void posix_cpu_timers_init(struct task_struct *tsk)
 {
-       tsk->cputime_expires.prof_exp = cputime_zero;
-       tsk->cputime_expires.virt_exp = cputime_zero;
+       tsk->cputime_expires.prof_exp = 0;
+       tsk->cputime_expires.virt_exp = 0;
        tsk->cputime_expires.sched_exp = 0;
        INIT_LIST_HEAD(&tsk->cpu_timers[0]);
        INIT_LIST_HEAD(&tsk->cpu_timers[1]);
@@ -1022,6 +1176,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                    p->real_cred->user != INIT_USER)
                        goto bad_fork_free;
        }
+       current->flags &= ~PF_NPROC_EXCEEDED;
 
        retval = copy_creds(p, clone_flags);
        if (retval < 0)
@@ -1050,14 +1205,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        init_sigpending(&p->pending);
 
-       p->utime = cputime_zero;
-       p->stime = cputime_zero;
-       p->gtime = cputime_zero;
-       p->utimescaled = cputime_zero;
-       p->stimescaled = cputime_zero;
+       p->utime = p->stime = p->gtime = 0;
+       p->utimescaled = p->stimescaled = 0;
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
-       p->prev_utime = cputime_zero;
-       p->prev_stime = cputime_zero;
+       p->prev_utime = p->prev_stime = 0;
 #endif
 #if defined(SPLIT_RSS_COUNTING)
        memset(&p->rss_stat, 0, sizeof(p->rss_stat));
@@ -1070,25 +1221,27 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        posix_cpu_timers_init(p);
 
-       p->lock_depth = -1;             /* -1 = no lock */
        do_posix_clock_monotonic_gettime(&p->start_time);
        p->real_start_time = p->start_time;
        monotonic_to_bootbased(&p->real_start_time);
        p->io_context = NULL;
        p->audit_context = NULL;
+       if (clone_flags & CLONE_THREAD)
+               threadgroup_change_begin(current);
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
-       if (IS_ERR(p->mempolicy)) {
-               retval = PTR_ERR(p->mempolicy);
-               p->mempolicy = NULL;
-               goto bad_fork_cleanup_cgroup;
-       }
+       if (IS_ERR(p->mempolicy)) {
+               retval = PTR_ERR(p->mempolicy);
+               p->mempolicy = NULL;
+               goto bad_fork_cleanup_cgroup;
+       }
        mpol_fix_fork_child_flag(p);
 #endif
 #ifdef CONFIG_CPUSETS
-       p->cpuset_mem_spread_rotor = node_random(p->mems_allowed);
-       p->cpuset_slab_spread_rotor = node_random(p->mems_allowed);
+       p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
+       p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
+       seqcount_init(&p->mems_allowed_seq);
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
        p->irq_events = 0;
@@ -1124,30 +1277,38 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #endif
 
        /* Perform scheduler related setup. Assign this task to a CPU. */
-       sched_fork(p, clone_flags);
+       sched_fork(p);
 
        retval = perf_event_init_task(p);
        if (retval)
                goto bad_fork_cleanup_policy;
-
-       if ((retval = audit_alloc(p)))
+       retval = audit_alloc(p);
+       if (retval)
                goto bad_fork_cleanup_policy;
        /* copy all the process information */
-       if ((retval = copy_semundo(clone_flags, p)))
+       retval = copy_semundo(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_audit;
-       if ((retval = copy_files(clone_flags, p)))
+       retval = copy_files(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_semundo;
-       if ((retval = copy_fs(clone_flags, p)))
+       retval = copy_fs(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_files;
-       if ((retval = copy_sighand(clone_flags, p)))
+       retval = copy_sighand(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_fs;
-       if ((retval = copy_signal(clone_flags, p)))
+       retval = copy_signal(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_sighand;
-       if ((retval = copy_mm(clone_flags, p)))
+       retval = copy_mm(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_signal;
-       if ((retval = copy_namespaces(clone_flags, p)))
+       retval = copy_namespaces(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_mm;
-       if ((retval = copy_io(clone_flags, p)))
+       retval = copy_io(clone_flags, p);
+       if (retval)
                goto bad_fork_cleanup_namespaces;
        retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
        if (retval)
@@ -1158,12 +1319,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                pid = alloc_pid(p->nsproxy->pid_ns);
                if (!pid)
                        goto bad_fork_cleanup_io;
-
-               if (clone_flags & CLONE_NEWPID) {
-                       retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
-                       if (retval < 0)
-                               goto bad_fork_free_pid;
-               }
        }
 
        p->pid = pid_nr(pid);
@@ -1171,17 +1326,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (clone_flags & CLONE_THREAD)
                p->tgid = current->tgid;
 
-       if (current->nsproxy != p->nsproxy) {
-               retval = ns_cgroup_clone(p, pid);
-               if (retval)
-                       goto bad_fork_free_pid;
-       }
-
        p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
        /*
         * Clear TID on mm_release()?
         */
-       p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+       p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+#ifdef CONFIG_BLOCK
+       p->plug = NULL;
+#endif
 #ifdef CONFIG_FUTEX
        p->robust_list = NULL;
 #ifdef CONFIG_COMPAT
@@ -1208,10 +1360,20 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        clear_all_latency_tracing(p);
 
        /* ok, now we should be set up.. */
-       p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
+       if (clone_flags & CLONE_THREAD)
+               p->exit_signal = -1;
+       else if (clone_flags & CLONE_PARENT)
+               p->exit_signal = current->group_leader->exit_signal;
+       else
+               p->exit_signal = (clone_flags & CSIGNAL);
+
        p->pdeath_signal = 0;
        p->exit_state = 0;
 
+       p->nr_dirtied = 0;
+       p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
+       p->dirty_paused_when = 0;
+
        /*
         * Ok, make it visible to the rest of the system.
         * We dont wake it up yet.
@@ -1246,7 +1408,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
         * it's process group.
         * A fatal signal pending means that current will exit, so the new
         * thread can't slip out of an OOM kill (or normal SIGKILL).
-        */
+       */
        recalc_sigpending();
        if (signal_pending(current)) {
                spin_unlock(&current->sighand->siglock);
@@ -1256,18 +1418,18 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        }
 
        if (clone_flags & CLONE_THREAD) {
-               atomic_inc(&current->signal->sigcnt);
-               atomic_inc(&current->signal->count);
+               current->signal->nr_threads++;
                atomic_inc(&current->signal->live);
+               atomic_inc(&current->signal->sigcnt);
                p->group_leader = current->group_leader;
                list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
        }
 
        if (likely(p->pid)) {
-               tracehook_finish_clone(p, clone_flags, trace);
+               ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
 
                if (thread_group_leader(p)) {
-                       if (clone_flags & CLONE_NEWPID)
+                       if (is_child_reaper(pid))
                                p->nsproxy->pid_ns->child_reaper = p;
 
                        p->signal->leader_pid = pid;
@@ -1276,7 +1438,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        attach_pid(p, PIDTYPE_SID, task_session(current));
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
-                       __get_cpu_var(process_counts)++;
+                       __this_cpu_inc(process_counts);
                }
                attach_pid(p, PIDTYPE_PID, pid);
                nr_threads++;
@@ -1287,7 +1449,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        write_unlock_irq(&tasklist_lock);
        proc_fork_connector(p);
        cgroup_post_fork(p);
+       if (clone_flags & CLONE_THREAD)
+               threadgroup_change_end(current);
        perf_event_fork(p);
+
+       trace_task_newtask(p, clone_flags);
+
        return p;
 
 bad_fork_free_pid:
@@ -1320,6 +1487,8 @@ bad_fork_cleanup_policy:
        mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
 #endif
+       if (clone_flags & CLONE_THREAD)
+               threadgroup_change_end(current);
        cgroup_exit(p, cgroup_callbacks_done);
        delayacct_tsk_free(p);
        module_put(task_thread_info(p)->exec_domain->module);
@@ -1338,6 +1507,16 @@ noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_re
        return regs;
 }
 
+static inline void init_idle_pids(struct pid_link *links)
+{
+       enum pid_type type;
+
+       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+               INIT_HLIST_NODE(&links[type].node); /* not really needed */
+               links[type].pid = &init_struct_pid;
+       }
+}
+
 struct task_struct * __cpuinit fork_idle(int cpu)
 {
        struct task_struct *task;
@@ -1345,8 +1524,10 @@ struct task_struct * __cpuinit fork_idle(int cpu)
 
        task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
                            &init_struct_pid, 0);
-       if (!IS_ERR(task))
+       if (!IS_ERR(task)) {
+               init_idle_pids(task->pids);
                init_idle(task, cpu);
+       }
 
        return task;
 }
@@ -1384,28 +1565,23 @@ long do_fork(unsigned long clone_flags,
        }
 
        /*
-        * We hope to recycle these flags after 2.6.26
+        * Determine whether and which event to report to ptracer.  When
+        * called from kernel_thread or CLONE_UNTRACED is explicitly
+        * requested, no event is reported; otherwise, report if the event
+        * for the type of forking is enabled.
         */
-       if (unlikely(clone_flags & CLONE_STOPPED)) {
-               static int __read_mostly count = 100;
-
-               if (count > 0 && printk_ratelimit()) {
-                       char comm[TASK_COMM_LEN];
+       if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
+               if (clone_flags & CLONE_VFORK)
+                       trace = PTRACE_EVENT_VFORK;
+               else if ((clone_flags & CSIGNAL) != SIGCHLD)
+                       trace = PTRACE_EVENT_CLONE;
+               else
+                       trace = PTRACE_EVENT_FORK;
 
-                       count--;
-                       printk(KERN_INFO "fork(): process `%s' used deprecated "
-                                       "clone flags 0x%lx\n",
-                               get_task_comm(comm, current),
-                               clone_flags & CLONE_STOPPED);
-               }
+               if (likely(!ptrace_event_enabled(current, trace)))
+                       trace = 0;
        }
 
-       /*
-        * When called from kernel_thread, don't do user tracing stuff.
-        */
-       if (likely(user_mode(regs)))
-               trace = tracehook_prepare_clone(clone_flags);
-
        p = copy_process(clone_flags, stack_start, regs, stack_size,
                         child_tidptr, NULL, trace);
        /*
@@ -1425,38 +1601,18 @@ long do_fork(unsigned long clone_flags,
                if (clone_flags & CLONE_VFORK) {
                        p->vfork_done = &vfork;
                        init_completion(&vfork);
+                       get_task_struct(p);
                }
 
-               audit_finish_fork(p);
-               tracehook_report_clone(regs, clone_flags, nr, p);
-
-               /*
-                * We set PF_STARTING at creation in case tracing wants to
-                * use this to distinguish a fully live task from one that
-                * hasn't gotten to tracehook_report_clone() yet.  Now we
-                * clear it and set the child going.
-                */
-               p->flags &= ~PF_STARTING;
-
-               if (unlikely(clone_flags & CLONE_STOPPED)) {
-                       /*
-                        * We'll start up with an immediate SIGSTOP.
-                        */
-                       sigaddset(&p->pending.signal, SIGSTOP);
-                       set_tsk_thread_flag(p, TIF_SIGPENDING);
-                       __set_task_state(p, TASK_STOPPED);
-               } else {
-                       wake_up_new_task(p, clone_flags);
-               }
+               wake_up_new_task(p);
 
-               tracehook_report_clone_complete(trace, regs,
-                                               clone_flags, nr, p);
+               /* forking complete and child started to run, tell ptracer */
+               if (unlikely(trace))
+                       ptrace_event(trace, nr);
 
                if (clone_flags & CLONE_VFORK) {
-                       freezer_do_not_count();
-                       wait_for_completion(&vfork);
-                       freezer_count();
-                       tracehook_report_vfork_done(p, nr);
+                       if (!wait_for_vfork_done(p, &vfork))
+                               ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
                }
        } else {
                nr = PTR_ERR(p);
@@ -1491,54 +1647,40 @@ void __init proc_caches_init(void)
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
+       /*
+        * FIXME! The "sizeof(struct mm_struct)" currently includes the
+        * whole struct cpumask for the OFFSTACK case. We could change
+        * this to *only* allocate as much of it as required by the
+        * maximum number of CPU's we can ever have.  The cpumask_allocation
+        * is at the end of the structure, exactly for that reason.
+        */
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
        mmap_init();
+       nsproxy_cache_init();
 }
 
 /*
- * Check constraints on flags passed to the unshare system call and
- * force unsharing of additional process context as appropriate.
+ * Check constraints on flags passed to the unshare system call.
  */
-static void check_unshare_flags(unsigned long *flags_ptr)
+static int check_unshare_flags(unsigned long unshare_flags)
 {
+       if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
+                               CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
+                               CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
+               return -EINVAL;
        /*
-        * If unsharing a thread from a thread group, must also
-        * unshare vm.
-        */
-       if (*flags_ptr & CLONE_THREAD)
-               *flags_ptr |= CLONE_VM;
-
-       /*
-        * If unsharing vm, must also unshare signal handlers.
-        */
-       if (*flags_ptr & CLONE_VM)
-               *flags_ptr |= CLONE_SIGHAND;
-
-       /*
-        * If unsharing signal handlers and the task was created
-        * using CLONE_THREAD, then must unshare the thread
-        */
-       if ((*flags_ptr & CLONE_SIGHAND) &&
-           (atomic_read(&current->signal->count) > 1))
-               *flags_ptr |= CLONE_THREAD;
-
-       /*
-        * If unsharing namespace, must also unshare filesystem information.
+        * Not implemented, but pretend it works if there is nothing to
+        * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
+        * needs to unshare vm.
         */
-       if (*flags_ptr & CLONE_NEWNS)
-               *flags_ptr |= CLONE_FS;
-}
-
-/*
- * Unsharing of tasks created with CLONE_THREAD is not supported yet
- */
-static int unshare_thread(unsigned long unshare_flags)
-{
-       if (unshare_flags & CLONE_THREAD)
-               return -EINVAL;
+       if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
+               /* FIXME: get_task_mm() increments ->mm_users */
+               if (atomic_read(&current->mm->mm_users) > 1)
+                       return -EINVAL;
+       }
 
        return 0;
 }
@@ -1565,34 +1707,6 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
 }
 
 /*
- * Unsharing of sighand is not supported yet
- */
-static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
-{
-       struct sighand_struct *sigh = current->sighand;
-
-       if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
-               return -EINVAL;
-       else
-               return 0;
-}
-
-/*
- * Unshare vm if it is being shared
- */
-static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
-{
-       struct mm_struct *mm = current->mm;
-
-       if ((unshare_flags & CLONE_VM) &&
-           (mm && atomic_read(&mm->mm_users) > 1)) {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/*
  * Unshare file descriptor table if it is being shared
  */
 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
@@ -1620,45 +1734,39 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp
  */
 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
 {
-       int err = 0;
        struct fs_struct *fs, *new_fs = NULL;
-       struct sighand_struct *new_sigh = NULL;
-       struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
        struct files_struct *fd, *new_fd = NULL;
        struct nsproxy *new_nsproxy = NULL;
        int do_sysvsem = 0;
+       int err;
 
-       check_unshare_flags(&unshare_flags);
-
-       /* Return -EINVAL for all unsupported flags */
-       err = -EINVAL;
-       if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
-                               CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
-                               CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
+       err = check_unshare_flags(unshare_flags);
+       if (err)
                goto bad_unshare_out;
 
        /*
+        * If unsharing namespace, must also unshare filesystem information.
+        */
+       if (unshare_flags & CLONE_NEWNS)
+               unshare_flags |= CLONE_FS;
+       /*
         * CLONE_NEWIPC must also detach from the undolist: after switching
         * to a new ipc namespace, the semaphore arrays from the old
         * namespace are unreachable.
         */
        if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
                do_sysvsem = 1;
-       if ((err = unshare_thread(unshare_flags)))
+       err = unshare_fs(unshare_flags, &new_fs);
+       if (err)
                goto bad_unshare_out;
-       if ((err = unshare_fs(unshare_flags, &new_fs)))
-               goto bad_unshare_cleanup_thread;
-       if ((err = unshare_sighand(unshare_flags, &new_sigh)))
+       err = unshare_fd(unshare_flags, &new_fd);
+       if (err)
                goto bad_unshare_cleanup_fs;
-       if ((err = unshare_vm(unshare_flags, &new_mm)))
-               goto bad_unshare_cleanup_sigh;
-       if ((err = unshare_fd(unshare_flags, &new_fd)))
-               goto bad_unshare_cleanup_vm;
-       if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
-                       new_fs)))
+       err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
+       if (err)
                goto bad_unshare_cleanup_fd;
 
-       if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
+       if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
                if (do_sysvsem) {
                        /*
                         * CLONE_SYSVSEM is equivalent to sys_exit().
@@ -1675,22 +1783,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
 
                if (new_fs) {
                        fs = current->fs;
-                       write_lock(&fs->lock);
+                       spin_lock(&fs->lock);
                        current->fs = new_fs;
                        if (--fs->users)
                                new_fs = NULL;
                        else
                                new_fs = fs;
-                       write_unlock(&fs->lock);
-               }
-
-               if (new_mm) {
-                       mm = current->mm;
-                       active_mm = current->active_mm;
-                       current->mm = new_mm;
-                       current->active_mm = new_mm;
-                       activate_mm(active_mm, new_mm);
-                       new_mm = mm;
+                       spin_unlock(&fs->lock);
                }
 
                if (new_fd) {
@@ -1709,20 +1808,10 @@ bad_unshare_cleanup_fd:
        if (new_fd)
                put_files_struct(new_fd);
 
-bad_unshare_cleanup_vm:
-       if (new_mm)
-               mmput(new_mm);
-
-bad_unshare_cleanup_sigh:
-       if (new_sigh)
-               if (atomic_dec_and_test(&new_sigh->count))
-                       kmem_cache_free(sighand_cachep, new_sigh);
-
 bad_unshare_cleanup_fs:
        if (new_fs)
                free_fs_struct(new_fs);
 
-bad_unshare_cleanup_thread:
 bad_unshare_out:
        return err;
 }