Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
[linux-flexiantxendom0-3.2.10.git] / kernel / signal.c
index 4a9d763..17afcaf 100644 (file)
@@ -11,7 +11,7 @@
  */
 
 #include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
@@ -28,6 +28,7 @@
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
+#include <linux/user_namespace.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
 
@@ -35,6 +36,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/siginfo.h>
+#include <asm/cacheflush.h>
 #include "audit.h"     /* audit_signal_info() */
 
 /*
@@ -57,21 +59,20 @@ static int sig_handler_ignored(void __user *handler, int sig)
                (handler == SIG_DFL && sig_kernel_ignore(sig));
 }
 
-static int sig_task_ignored(struct task_struct *t, int sig,
-               int from_ancestor_ns)
+static int sig_task_ignored(struct task_struct *t, int sig, bool force)
 {
        void __user *handler;
 
        handler = sig_handler(t, sig);
 
        if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
-                       handler == SIG_DFL && !from_ancestor_ns)
+                       handler == SIG_DFL && !force)
                return 1;
 
        return sig_handler_ignored(handler, sig);
 }
 
-static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
+static int sig_ignored(struct task_struct *t, int sig, bool force)
 {
        /*
         * Blocked signals are never ignored, since the
@@ -81,13 +82,13 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
        if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
                return 0;
 
-       if (!sig_task_ignored(t, sig, from_ancestor_ns))
+       if (!sig_task_ignored(t, sig, force))
                return 0;
 
        /*
         * Tracers may want to know about even ignored signals.
         */
-       return !tracehook_consider_ignored_signal(t, sig);
+       return !t->ptrace;
 }
 
 /*
@@ -124,7 +125,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 
 static int recalc_sigpending_tsk(struct task_struct *t)
 {
-       if (t->signal->group_stop_count > 0 ||
+       if ((t->jobctl & JOBCTL_PENDING_MASK) ||
            PENDING(&t->pending, &t->blocked) ||
            PENDING(&t->signal->shared_pending, &t->blocked)) {
                set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -150,15 +151,17 @@ void recalc_sigpending_and_wake(struct task_struct *t)
 
 void recalc_sigpending(void)
 {
-       if (unlikely(tracehook_force_sigpending()))
-               set_thread_flag(TIF_SIGPENDING);
-       else if (!recalc_sigpending_tsk(current) && !freezing(current))
+       if (!recalc_sigpending_tsk(current) && !freezing(current))
                clear_thread_flag(TIF_SIGPENDING);
 
 }
 
 /* Given the mask, find the first available signal that should be serviced. */
 
+#define SYNCHRONOUS_MASK \
+       (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
+        sigmask(SIGTRAP) | sigmask(SIGFPE))
+
 int next_signal(struct sigpending *pending, sigset_t *mask)
 {
        unsigned long i, *s, *m, x;
@@ -166,26 +169,39 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
 
        s = pending->signal.sig;
        m = mask->sig;
+
+       /*
+        * Handle the first word specially: it contains the
+        * synchronous signals that need to be dequeued first.
+        */
+       x = *s &~ *m;
+       if (x) {
+               if (x & SYNCHRONOUS_MASK)
+                       x &= SYNCHRONOUS_MASK;
+               sig = ffz(~x) + 1;
+               return sig;
+       }
+
        switch (_NSIG_WORDS) {
        default:
-               for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
-                       if ((x = *s &~ *m) != 0) {
-                               sig = ffz(~x) + i*_NSIG_BPW + 1;
-                               break;
-                       }
+               for (i = 1; i < _NSIG_WORDS; ++i) {
+                       x = *++s &~ *++m;
+                       if (!x)
+                               continue;
+                       sig = ffz(~x) + i*_NSIG_BPW + 1;
+                       break;
+               }
                break;
 
-       case 2: if ((x = s[0] &~ m[0]) != 0)
-                       sig = 1;
-               else if ((x = s[1] &~ m[1]) != 0)
-                       sig = _NSIG_BPW + 1;
-               else
+       case 2:
+               x = s[1] &~ m[1];
+               if (!x)
                        break;
-               sig += ffz(~x);
+               sig = ffz(~x) + _NSIG_BPW + 1;
                break;
 
-       case 1: if ((x = *s &~ *m) != 0)
-                       sig = ffz(~x) + 1;
+       case 1:
+               /* Nothing to do */
                break;
        }
 
@@ -206,10 +222,133 @@ static inline void print_dropped_signal(int sig)
                                current->comm, current->pid, sig);
 }
 
+/**
+ * task_set_jobctl_pending - set jobctl pending bits
+ * @task: target task
+ * @mask: pending bits to set
+ *
+ * Clear @mask from @task->jobctl.  @mask must be subset of
+ * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
+ * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
+ * cleared.  If @task is already being killed or exiting, this function
+ * becomes noop.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ *
+ * RETURNS:
+ * %true if @mask is set, %false if made noop because @task was dying.
+ */
+bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
+{
+       BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
+                       JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
+       BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
+
+       if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
+               return false;
+
+       if (mask & JOBCTL_STOP_SIGMASK)
+               task->jobctl &= ~JOBCTL_STOP_SIGMASK;
+
+       task->jobctl |= mask;
+       return true;
+}
+
+/**
+ * task_clear_jobctl_trapping - clear jobctl trapping bit
+ * @task: target task
+ *
+ * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
+ * Clear it and wake up the ptracer.  Note that we don't need any further
+ * locking.  @task->siglock guarantees that @task->parent points to the
+ * ptracer.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+void task_clear_jobctl_trapping(struct task_struct *task)
+{
+       if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
+               task->jobctl &= ~JOBCTL_TRAPPING;
+               wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
+       }
+}
+
+/**
+ * task_clear_jobctl_pending - clear jobctl pending bits
+ * @task: target task
+ * @mask: pending bits to clear
+ *
+ * Clear @mask from @task->jobctl.  @mask must be subset of
+ * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
+ * STOP bits are cleared together.
+ *
+ * If clearing of @mask leaves no stop or trap pending, this function calls
+ * task_clear_jobctl_trapping().
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
+{
+       BUG_ON(mask & ~JOBCTL_PENDING_MASK);
+
+       if (mask & JOBCTL_STOP_PENDING)
+               mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
+
+       task->jobctl &= ~mask;
+
+       if (!(task->jobctl & JOBCTL_PENDING_MASK))
+               task_clear_jobctl_trapping(task);
+}
+
+/**
+ * task_participate_group_stop - participate in a group stop
+ * @task: task participating in a group stop
+ *
+ * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
+ * Group stop states are cleared and the group stop count is consumed if
+ * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
+ * stop, the appropriate %SIGNAL_* flags are set.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ *
+ * RETURNS:
+ * %true if group stop completion should be notified to the parent, %false
+ * otherwise.
+ */
+static bool task_participate_group_stop(struct task_struct *task)
+{
+       struct signal_struct *sig = task->signal;
+       bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
+
+       WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
+
+       task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
+
+       if (!consume)
+               return false;
+
+       if (!WARN_ON_ONCE(sig->group_stop_count == 0))
+               sig->group_stop_count--;
+
+       /*
+        * Tell the caller to notify completion iff we are entering into a
+        * fresh group stop.  Read comment in do_signal_stop() for details.
+        */
+       if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
+               sig->flags = SIGNAL_STOP_STOPPED;
+               return true;
+       }
+       return false;
+}
+
 /*
  * allocate a new signal queue record
  * - this may be called without locks if and only if t == current, otherwise an
- *   appopriate lock must be held to stop the target task from exiting
+ *   appropriate lock must be held to stop the target task from exiting
  */
 static struct sigqueue *
 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
@@ -218,17 +357,17 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
        struct user_struct *user;
 
        /*
-        * We won't get problems with the target's UID changing under us
-        * because changing it requires RCU be used, and if t != current, the
-        * caller must be holding the RCU readlock (by way of a spinlock) and
-        * we use RCU protection here
+        * Protect access to @t credentials. This can go away when all
+        * callers hold rcu read lock.
         */
+       rcu_read_lock();
        user = get_uid(__task_cred(t)->user);
        atomic_inc(&user->sigpending);
+       rcu_read_unlock();
 
        if (override_rlimit ||
            atomic_read(&user->sigpending) <=
-                       t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
+                       task_rlimit(t, RLIMIT_SIGPENDING)) {
                q = kmem_cache_alloc(sigqueue_cachep, flags);
        } else {
                print_dropped_signal(sig);
@@ -355,18 +494,19 @@ int unhandled_signal(struct task_struct *tsk, int sig)
                return 1;
        if (handler != SIG_IGN && handler != SIG_DFL)
                return 0;
-       return !tracehook_consider_fatal_signal(tsk, sig);
+       /* if ptraced, let the tracer determine */
+       return !tsk->ptrace;
 }
 
-
-/* Notify the system that a driver wants to block all signals for this
+/*
+ * Notify the system that a driver wants to block all signals for this
  * process, and wants to be notified if any signals at all were to be
  * sent/acted upon.  If the notifier routine returns non-zero, then the
  * signal will be acted upon after all.  If the notifier routine returns 0,
  * then then signal will be blocked.  Only one block per process is
  * allowed.  priv is a pointer to private data that the notifier routine
- * can use to determine if the signal should be blocked or not.  */
-
+ * can use to determine if the signal should be blocked or not.
+ */
 void
 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
 {
@@ -417,9 +557,10 @@ still_pending:
                copy_siginfo(info, &first->info);
                __sigqueue_free(first);
        } else {
-               /* Ok, it wasn't in the queue.  This must be
-                  a fast-pathed signal or we must have been
-                  out of queue space.  So zero out the info.
+               /*
+                * Ok, it wasn't in the queue.  This must be
+                * a fast-pathed signal or we must have been
+                * out of queue space.  So zero out the info.
                 */
                info->si_signo = sig;
                info->si_errno = 0;
@@ -451,7 +592,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 }
 
 /*
- * Dequeue a signal and return the element to the caller, which is 
+ * Dequeue a signal and return the element to the caller, which is
  * expected to free it.
  *
  * All callers have to hold the siglock.
@@ -473,7 +614,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                 * itimers are process shared and we restart periodic
                 * itimers in the signal delivery path to prevent DoS
                 * attacks in the high resolution timer case. This is
-                * compliant with the old way of self restarting
+                * compliant with the old way of self-restarting
                 * itimers, as the SIGALRM is a legacy signal and only
                 * queued once. Changing the restart behaviour to
                 * restart the timer in the signal dequeue path is
@@ -509,7 +650,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+               current->jobctl |= JOBCTL_STOP_DEQUEUED;
        }
        if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
                /*
@@ -574,7 +715,7 @@ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
        if (sigisemptyset(&m))
                return 0;
 
-       signandsets(&s->signal, &s->signal, mask);
+       sigandnsets(&s->signal, &s->signal, mask);
        list_for_each_entry_safe(q, n, &s->list, list) {
                if (sigismember(mask, q->info.si_signo)) {
                        list_del_init(&q->list);
@@ -619,13 +760,33 @@ static inline bool si_fromuser(const struct siginfo *info)
 }
 
 /*
+ * called with RCU read lock from check_kill_permission()
+ */
+static int kill_ok_by_cred(struct task_struct *t)
+{
+       const struct cred *cred = current_cred();
+       const struct cred *tcred = __task_cred(t);
+
+       if (cred->user->user_ns == tcred->user->user_ns &&
+           (cred->euid == tcred->suid ||
+            cred->euid == tcred->uid ||
+            cred->uid  == tcred->suid ||
+            cred->uid  == tcred->uid))
+               return 1;
+
+       if (ns_capable(tcred->user->user_ns, CAP_KILL))
+               return 1;
+
+       return 0;
+}
+
+/*
  * Bad permissions for sending the signal
- * - the caller must hold at least the RCU read lock
+ * - the caller must hold the RCU read lock
  */
 static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
-       const struct cred *cred = current_cred(), *tcred;
        struct pid *sid;
        int error;
 
@@ -639,12 +800,8 @@ static int check_kill_permission(int sig, struct siginfo *info,
        if (error)
                return error;
 
-       tcred = __task_cred(t);
-       if ((cred->euid ^ tcred->suid) &&
-           (cred->euid ^ tcred->uid) &&
-           (cred->uid  ^ tcred->suid) &&
-           (cred->uid  ^ tcred->uid) &&
-           !capable(CAP_KILL)) {
+       if (!same_thread_group(current, t) &&
+           !kill_ok_by_cred(t)) {
                switch (sig) {
                case SIGCONT:
                        sid = task_session(t);
@@ -662,6 +819,32 @@ static int check_kill_permission(int sig, struct siginfo *info,
        return security_task_kill(t, info, sig, 0);
 }
 
+/**
+ * ptrace_trap_notify - schedule trap to notify ptracer
+ * @t: tracee wanting to notify tracer
+ *
+ * This function schedules sticky ptrace trap which is cleared on the next
+ * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
+ * ptracer.
+ *
+ * If @t is running, STOP trap will be taken.  If trapped for STOP and
+ * ptracer is listening for events, tracee is woken up so that it can
+ * re-trap for the new event.  If trapped otherwise, STOP trap will be
+ * eventually taken without returning to userland after the existing traps
+ * are finished by PTRACE_CONT.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static void ptrace_trap_notify(struct task_struct *t)
+{
+       WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
+       assert_spin_locked(&t->sighand->siglock);
+
+       task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+       signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+}
+
 /*
  * Handle magic process-wide effects of stop/continue signals. Unlike
  * the signal actions, these happen immediately at signal-generation
@@ -672,7 +855,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
  * Returns true if the signal should be actually delivered, otherwise
  * it should be dropped.
  */
-static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
+static int prepare_signal(int sig, struct task_struct *p, bool force)
 {
        struct signal_struct *signal = p->signal;
        struct task_struct *t;
@@ -693,34 +876,17 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
        } else if (sig == SIGCONT) {
                unsigned int why;
                /*
-                * Remove all stop signals from all queues,
-                * and wake all threads.
+                * Remove all stop signals from all queues, wake all threads.
                 */
                rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
                t = p;
                do {
-                       unsigned int state;
+                       task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-                       /*
-                        * If there is a handler for SIGCONT, we must make
-                        * sure that no thread returns to user mode before
-                        * we post the signal, in case it was the only
-                        * thread eligible to run the signal handler--then
-                        * it must not do anything between resuming and
-                        * running the handler.  With the TIF_SIGPENDING
-                        * flag set, the thread will pause and acquire the
-                        * siglock that we hold now and until we've queued
-                        * the pending signal.
-                        *
-                        * Wake up the stopped thread _after_ setting
-                        * TIF_SIGPENDING
-                        */
-                       state = __TASK_STOPPED;
-                       if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
-                               set_tsk_thread_flag(t, TIF_SIGPENDING);
-                               state |= TASK_INTERRUPTIBLE;
-                       }
-                       wake_up_state(t, state);
+                       if (likely(!(t->ptrace & PT_SEIZED)))
+                               wake_up_state(t, __TASK_STOPPED);
+                       else
+                               ptrace_trap_notify(t);
                } while_each_thread(p, t);
 
                /*
@@ -746,17 +912,10 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
                        signal->flags = why | SIGNAL_STOP_CONTINUED;
                        signal->group_stop_count = 0;
                        signal->group_exit_code = 0;
-               } else {
-                       /*
-                        * We are not stopped, but there could be a stop
-                        * signal in the middle of being processed after
-                        * being removed from the queue.  Clear that too.
-                        */
-                       signal->flags &= ~SIGNAL_STOP_DEQUEUED;
                }
        }
 
-       return !sig_ignored(p, sig, from_ancestor_ns);
+       return !sig_ignored(p, sig, force);
 }
 
 /*
@@ -824,8 +983,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
        if (sig_fatal(p, sig) &&
            !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
            !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL ||
-            !tracehook_consider_fatal_signal(t, sig))) {
+           (sig == SIGKILL || !t->ptrace)) {
                /*
                 * This signal will be fatal to the whole group.
                 */
@@ -841,6 +999,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
                        signal->group_stop_count = 0;
                        t = p;
                        do {
+                               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                                sigaddset(&t->pending.signal, SIGKILL);
                                signal_wake_up(t, 1);
                        } while_each_thread(p, t);
@@ -861,19 +1020,48 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
        return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 }
 
+/*
+ * map the uid in struct cred into user namespace *ns
+ */
+static inline uid_t map_cred_ns(const struct cred *cred,
+                               struct user_namespace *ns)
+{
+       return user_ns_map_uid(ns, cred, cred->uid);
+}
+
+#ifdef CONFIG_USER_NS
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+       if (current_user_ns() == task_cred_xxx(t, user_ns))
+               return;
+
+       if (SI_FROMKERNEL(info))
+               return;
+
+       info->si_uid = user_ns_map_uid(task_cred_xxx(t, user_ns),
+                                       current_cred(), info->si_uid);
+}
+#else
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+       return;
+}
+#endif
+
 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        int group, int from_ancestor_ns)
 {
        struct sigpending *pending;
        struct sigqueue *q;
        int override_rlimit;
-
-       trace_signal_generate(sig, info, t);
+       int ret = 0, result;
 
        assert_spin_locked(&t->sighand->siglock);
 
-       if (!prepare_signal(sig, t, from_ancestor_ns))
-               return 0;
+       result = TRACE_SIGNAL_IGNORED;
+       if (!prepare_signal(sig, t,
+                       from_ancestor_ns || (info == SEND_SIG_FORCED)))
+               goto ret;
 
        pending = group ? &t->signal->shared_pending : &t->pending;
        /*
@@ -881,8 +1069,11 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
         * exactly one non-rt signal, so that we can get more
         * detailed information about the cause of the signal.
         */
+       result = TRACE_SIGNAL_ALREADY_PENDING;
        if (legacy_queue(pending, sig))
-               return 0;
+               goto ret;
+
+       result = TRACE_SIGNAL_DELIVERED;
        /*
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
@@ -890,14 +1081,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
        if (info == SEND_SIG_FORCED)
                goto out_set;
 
-       /* Real-time signals must be queued if sent by sigqueue, or
-          some other real-time mechanism.  It is implementation
-          defined whether kill() does so.  We attempt to do so, on
-          the principle of least surprise, but since kill is not
-          allowed to fail with EAGAIN when low on memory we just
-          make sure at least one signal gets delivered and don't
-          pass on the info struct.  */
-
+       /*
+        * Real-time signals must be queued if sent by sigqueue, or
+        * some other real-time mechanism.  It is implementation
+        * defined whether kill() does so.  We attempt to do so, on
+        * the principle of least surprise, but since kill is not
+        * allowed to fail with EAGAIN when low on memory we just
+        * make sure at least one signal gets delivered and don't
+        * pass on the info struct.
+        */
        if (sig < SIGRTMIN)
                override_rlimit = (is_si_special(info) || info->si_code >= 0);
        else
@@ -929,6 +1121,9 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                                q->info.si_pid = 0;
                        break;
                }
+
+               userns_fixup_signal_uid(&q->info, t);
+
        } else if (!is_si_special(info)) {
                if (sig >= SIGRTMIN && info->si_code != SI_USER) {
                        /*
@@ -936,14 +1131,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                         * signal was rt and sent by user using something
                         * other than kill().
                         */
-                       trace_signal_overflow_fail(sig, group, info);
-                       return -EAGAIN;
+                       result = TRACE_SIGNAL_OVERFLOW_FAIL;
+                       ret = -EAGAIN;
+                       goto ret;
                } else {
                        /*
                         * This is a silent loss of information.  We still
                         * send the signal, but the *info bits are lost.
                         */
-                       trace_signal_lose_info(sig, group, info);
+                       result = TRACE_SIGNAL_LOSE_INFO;
                }
        }
 
@@ -951,7 +1147,9 @@ out_set:
        signalfd_notify(t, sig);
        sigaddset(&pending->signal, sig);
        complete_signal(sig, t, group);
-       return 0;
+ret:
+       trace_signal_generate(sig, info, t, group, result);
+       return ret;
 }
 
 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
@@ -979,7 +1177,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
                for (i = 0; i < 16; i++) {
                        unsigned char insn;
 
-                       __get_user(insn, (unsigned char *)(regs->ip + i));
+                       if (get_user(insn, (unsigned char *)(regs->ip + i)))
+                               break;
                        printk("%02x ", insn);
                }
        }
@@ -1065,52 +1264,65 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 /*
  * Nuke all other threads in the group.
  */
-void zap_other_threads(struct task_struct *p)
+int zap_other_threads(struct task_struct *p)
 {
-       struct task_struct *t;
+       struct task_struct *t = p;
+       int count = 0;
 
        p->signal->group_stop_count = 0;
 
-       for (t = next_thread(p); t != p; t = next_thread(t)) {
-               /*
-                * Don't bother with already dead threads
-                */
+       while_each_thread(p, t) {
+               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
+               count++;
+
+               /* Don't bother with already dead threads */
                if (t->exit_state)
                        continue;
-
-               /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
                signal_wake_up(t, 1);
        }
+
+       return count;
 }
 
-struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+                                          unsigned long *flags)
 {
        struct sighand_struct *sighand;
 
-       rcu_read_lock();
        for (;;) {
+               local_irq_save(*flags);
+               rcu_read_lock();
                sighand = rcu_dereference(tsk->sighand);
-               if (unlikely(sighand == NULL))
+               if (unlikely(sighand == NULL)) {
+                       rcu_read_unlock();
+                       local_irq_restore(*flags);
                        break;
+               }
 
-               spin_lock_irqsave(&sighand->siglock, *flags);
-               if (likely(sighand == tsk->sighand))
+               spin_lock(&sighand->siglock);
+               if (likely(sighand == tsk->sighand)) {
+                       rcu_read_unlock();
                        break;
-               spin_unlock_irqrestore(&sighand->siglock, *flags);
+               }
+               spin_unlock(&sighand->siglock);
+               rcu_read_unlock();
+               local_irq_restore(*flags);
        }
-       rcu_read_unlock();
 
        return sighand;
 }
 
 /*
  * send signal info to all the members of a group
- * - the caller must hold the RCU read lock at least
  */
 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
-       int ret = check_kill_permission(sig, info, p);
+       int ret;
+
+       rcu_read_lock();
+       ret = check_kill_permission(sig, info, p);
+       rcu_read_unlock();
 
        if (!ret && sig)
                ret = do_send_sig_info(sig, info, p, true);
@@ -1162,8 +1374,7 @@ retry:
        return error;
 }
 
-int
-kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
        int error;
        rcu_read_lock();
@@ -1172,44 +1383,55 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
        return error;
 }
 
+static int kill_as_cred_perm(const struct cred *cred,
+                            struct task_struct *target)
+{
+       const struct cred *pcred = __task_cred(target);
+       if (cred->user_ns != pcred->user_ns)
+               return 0;
+       if (cred->euid != pcred->suid && cred->euid != pcred->uid &&
+           cred->uid  != pcred->suid && cred->uid  != pcred->uid)
+               return 0;
+       return 1;
+}
+
 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
-int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
-                     uid_t uid, uid_t euid, u32 secid)
+int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
+                        const struct cred *cred, u32 secid)
 {
        int ret = -EINVAL;
        struct task_struct *p;
-       const struct cred *pcred;
+       unsigned long flags;
 
        if (!valid_signal(sig))
                return ret;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = pid_task(pid, PIDTYPE_PID);
        if (!p) {
                ret = -ESRCH;
                goto out_unlock;
        }
-       pcred = __task_cred(p);
-       if (si_fromuser(info) &&
-           euid != pcred->suid && euid != pcred->uid &&
-           uid  != pcred->suid && uid  != pcred->uid) {
+       if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
                ret = -EPERM;
                goto out_unlock;
        }
        ret = security_task_kill(p, info, sig, secid);
        if (ret)
                goto out_unlock;
-       if (sig && p->sighand) {
-               unsigned long flags;
-               spin_lock_irqsave(&p->sighand->siglock, flags);
-               ret = __send_signal(sig, info, p, 1, 0);
-               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+
+       if (sig) {
+               if (lock_task_sighand(p, &flags)) {
+                       ret = __send_signal(sig, info, p, 1, 0);
+                       unlock_task_sighand(p, &flags);
+               } else
+                       ret = -ESRCH;
        }
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return ret;
 }
-EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
+EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1257,8 +1479,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
  * These are for backward compatibility with the rest of the kernel source.
  */
 
-int
-send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
        /*
         * Make sure legacy kernel users don't send in bad values
@@ -1326,7 +1547,7 @@ EXPORT_SYMBOL(kill_pid);
  * These functions support sending signals using preallocated sigqueue
  * structures.  This is needed "because realtime applications cannot
  * afford to lose notifications of asynchronous events, like timer
- * expirations or I/O completions".  In the case of Posix Timers
+ * expirations or I/O completions".  In the case of POSIX Timers
  * we allocate the sigqueue structure from the timer_create.  If this
  * allocation fails we are able to report the failure to the application
  * with an EAGAIN error.
@@ -1371,7 +1592,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        int sig = q->info.si_signo;
        struct sigpending *pending;
        unsigned long flags;
-       int ret;
+       int ret, result;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
 
@@ -1380,7 +1601,8 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                goto ret;
 
        ret = 1; /* the signal is ignored */
-       if (!prepare_signal(sig, t, 0))
+       result = TRACE_SIGNAL_IGNORED;
+       if (!prepare_signal(sig, t, false))
                goto out;
 
        ret = 0;
@@ -1391,6 +1613,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                 */
                BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
+               result = TRACE_SIGNAL_ALREADY_PENDING;
                goto out;
        }
        q->info.si_overrun = 0;
@@ -1400,7 +1623,9 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        list_add_tail(&q->list, &pending->list);
        sigaddset(&pending->signal, sig);
        complete_signal(sig, t, group);
+       result = TRACE_SIGNAL_DELIVERED;
 out:
+       trace_signal_generate(sig, &q->info, t, group, result);
        unlock_task_sighand(t, &flags);
 ret:
        return ret;
@@ -1410,24 +1635,33 @@ ret:
  * Let a parent know about the death of a child.
  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
  *
- * Returns -1 if our parent ignored us and so we've switched to
- * self-reaping, or else @sig.
+ * Returns true if our parent ignored us and so we've switched to
+ * self-reaping.
  */
-int do_notify_parent(struct task_struct *tsk, int sig)
+bool do_notify_parent(struct task_struct *tsk, int sig)
 {
        struct siginfo info;
        unsigned long flags;
        struct sighand_struct *psig;
-       int ret = sig;
+       bool autoreap = false;
 
        BUG_ON(sig == -1);
 
        /* do_notify_parent_cldstop should have been called instead.  */
        BUG_ON(task_is_stopped_or_traced(tsk));
 
-       BUG_ON(!task_ptrace(tsk) &&
+       BUG_ON(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
 
+       if (sig != SIGCHLD) {
+               /*
+                * This is only possible if parent == real_parent.
+                * Check if it has changed security domain.
+                */
+               if (tsk->parent_exec_id != tsk->parent->self_exec_id)
+                       sig = SIGCHLD;
+       }
+
        info.si_signo = sig;
        info.si_errno = 0;
        /*
@@ -1444,13 +1678,12 @@ int do_notify_parent(struct task_struct *tsk, int sig)
         */
        rcu_read_lock();
        info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
-       info.si_uid = __task_cred(tsk)->uid;
+       info.si_uid = map_cred_ns(__task_cred(tsk),
+                       task_cred_xxx(tsk->parent, user_ns));
        rcu_read_unlock();
 
-       info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
-                               tsk->signal->utime));
-       info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
-                               tsk->signal->stime));
+       info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
+       info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
@@ -1464,7 +1697,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
 
        psig = tsk->parent->sighand;
        spin_lock_irqsave(&psig->siglock, flags);
-       if (!task_ptrace(tsk) && sig == SIGCHLD &&
+       if (!tsk->ptrace && sig == SIGCHLD &&
            (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
             (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
                /*
@@ -1482,28 +1715,42 @@ int do_notify_parent(struct task_struct *tsk, int sig)
                 * is implementation-defined: we do (if you don't want
                 * it, just use SIG_IGN instead).
                 */
-               ret = tsk->exit_signal = -1;
+               autoreap = true;
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
-                       sig = -1;
+                       sig = 0;
        }
-       if (valid_signal(sig) && sig > 0)
+       if (valid_signal(sig) && sig)
                __group_send_sig_info(sig, &info, tsk->parent);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
 
-       return ret;
+       return autoreap;
 }
 
-static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
+/**
+ * do_notify_parent_cldstop - notify parent of stopped/continued state change
+ * @tsk: task reporting the state change
+ * @for_ptracer: the notification is for ptracer
+ * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
+ *
+ * Notify @tsk's parent that the stopped/continued state has changed.  If
+ * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
+ * If %true, @tsk reports to @tsk->parent which should be the ptracer.
+ *
+ * CONTEXT:
+ * Must be called with tasklist_lock at least read locked.
+ */
+static void do_notify_parent_cldstop(struct task_struct *tsk,
+                                    bool for_ptracer, int why)
 {
        struct siginfo info;
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
 
-       if (task_ptrace(tsk))
+       if (for_ptracer) {
                parent = tsk->parent;
-       else {
+       } else {
                tsk = tsk->group_leader;
                parent = tsk->real_parent;
        }
@@ -1511,11 +1758,12 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
        info.si_signo = SIGCHLD;
        info.si_errno = 0;
        /*
-        * see comment in do_notify_parent() abot the following 3 lines
+        * see comment in do_notify_parent() about the following 4 lines
         */
        rcu_read_lock();
        info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
-       info.si_uid = __task_cred(tsk)->uid;
+       info.si_uid = map_cred_ns(__task_cred(tsk),
+                       task_cred_xxx(parent, user_ns));
        rcu_read_unlock();
 
        info.si_utime = cputime_to_clock_t(tsk->utime);
@@ -1550,7 +1798,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 
 static inline int may_ptrace_stop(void)
 {
-       if (!likely(task_ptrace(current)))
+       if (!likely(current->ptrace))
                return 0;
        /*
         * Are we in the middle of do_coredump?
@@ -1569,7 +1817,7 @@ static inline int may_ptrace_stop(void)
 }
 
 /*
- * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Return non-zero if there is a SIGKILL that should be waking us up.
  * Called with the siglock held.
  */
 static int sigkill_pending(struct task_struct *tsk)
@@ -1589,8 +1837,12 @@ static int sigkill_pending(struct task_struct *tsk)
  * If we actually decide not to stop at all because the tracer
  * is gone, we keep current->exit_code unless clear_code.
  */
-static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
+static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+       __releases(&current->sighand->siglock)
+       __acquires(&current->sighand->siglock)
 {
+       bool gstop_done = false;
+
        if (arch_ptrace_stop_needed(exit_code, info)) {
                /*
                 * The arch code has something special to do before a
@@ -1611,21 +1863,52 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
        }
 
        /*
-        * If there is a group stop in progress,
-        * we must participate in the bookkeeping.
+        * We're committing to trapping.  TRACED should be visible before
+        * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
+        * Also, transition to TRACED and updates to ->jobctl should be
+        * atomic with respect to siglock and should be done after the arch
+        * hook as siglock is released and regrabbed across it.
         */
-       if (current->signal->group_stop_count > 0)
-               --current->signal->group_stop_count;
+       set_current_state(TASK_TRACED);
 
        current->last_siginfo = info;
        current->exit_code = exit_code;
 
-       /* Let the debugger run.  */
-       __set_current_state(TASK_TRACED);
+       /*
+        * If @why is CLD_STOPPED, we're trapping to participate in a group
+        * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
+        * across siglock relocks since INTERRUPT was scheduled, PENDING
+        * could be clear now.  We act as if SIGCONT is received after
+        * TASK_TRACED is entered - ignore it.
+        */
+       if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
+               gstop_done = task_participate_group_stop(current);
+
+       /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
+       task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
+       if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
+               task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
+
+       /* entering a trap, clear TRAPPING */
+       task_clear_jobctl_trapping(current);
+
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
        if (may_ptrace_stop()) {
-               do_notify_parent_cldstop(current, CLD_TRAPPED);
+               /*
+                * Notify parents of the stop.
+                *
+                * While ptraced, there are two parents - the ptracer and
+                * the real_parent of the group_leader.  The ptracer should
+                * know about every stop while the real parent is only
+                * interested in the completion of group stop.  The states
+                * for the two don't interact with each other.  Notify
+                * separately unless they're gonna be duplicates.
+                */
+               do_notify_parent_cldstop(current, true, why);
+               if (gstop_done && ptrace_reparented(current))
+                       do_notify_parent_cldstop(current, false, why);
+
                /*
                 * Don't want to allow preemption here, because
                 * sys_ptrace() needs this task to be inactive.
@@ -1640,7 +1923,16 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
                /*
                 * By the time we got the lock, our tracer went away.
                 * Don't drop the lock yet, another tracer may come.
+                *
+                * If @gstop_done, the ptracer went away between group stop
+                * completion and here.  During detach, it would have set
+                * JOBCTL_STOP_PENDING on us and we'll re-enter
+                * TASK_STOPPED in do_signal_stop() on return, so notifying
+                * the real parent of the group stop completion is enough.
                 */
+               if (gstop_done)
+                       do_notify_parent_cldstop(current, false, why);
+
                __set_current_state(TASK_RUNNING);
                if (clear_code)
                        current->exit_code = 0;
@@ -1662,6 +1954,9 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
        spin_lock_irq(&current->sighand->siglock);
        current->last_siginfo = NULL;
 
+       /* LISTENING can be set only during STOP traps, clear it */
+       current->jobctl &= ~JOBCTL_LISTENING;
+
        /*
         * Queued signals ignored us while we were stopped for tracing.
         * So check for any that we should take before resuming user mode.
@@ -1670,107 +1965,202 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
        recalc_sigpending_tsk(current);
 }
 
-void ptrace_notify(int exit_code)
+static void ptrace_do_notify(int signr, int exit_code, int why)
 {
        siginfo_t info;
 
-       BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
-
        memset(&info, 0, sizeof info);
-       info.si_signo = SIGTRAP;
+       info.si_signo = signr;
        info.si_code = exit_code;
        info.si_pid = task_pid_vnr(current);
        info.si_uid = current_uid();
 
        /* Let the debugger run.  */
+       ptrace_stop(exit_code, why, 1, &info);
+}
+
+void ptrace_notify(int exit_code)
+{
+       BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, 1, &info);
+       ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
-/*
- * This performs the stopping for SIGSTOP and other stop signals.
- * We have to stop all threads in the thread group.
- * Returns nonzero if we've actually stopped and released the siglock.
- * Returns zero if we didn't stop and still hold the siglock.
+/**
+ * do_signal_stop - handle group stop for SIGSTOP and other stop signals
+ * @signr: signr causing group stop if initiating
+ *
+ * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
+ * and participate in it.  If already set, participate in the existing
+ * group stop.  If participated in a group stop (and thus slept), %true is
+ * returned with siglock released.
+ *
+ * If ptraced, this function doesn't handle stop itself.  Instead,
+ * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
+ * untouched.  The caller must ensure that INTERRUPT trap handling takes
+ * places afterwards.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which is released
+ * on %true return.
+ *
+ * RETURNS:
+ * %false if group stop is already cancelled or ptrace trap is scheduled.
+ * %true if participated in group stop.
  */
-static int do_signal_stop(int signr)
+static bool do_signal_stop(int signr)
+       __releases(&current->sighand->siglock)
 {
        struct signal_struct *sig = current->signal;
-       int notify;
 
-       if (!sig->group_stop_count) {
+       if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
+               unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
                struct task_struct *t;
 
-               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+               /* signr will be recorded in task->jobctl for retries */
+               WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
+
+               if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
                    unlikely(signal_group_exit(sig)))
-                       return 0;
+                       return false;
                /*
-                * There is no group stop already in progress.
-                * We must initiate one now.
+                * There is no group stop already in progress.  We must
+                * initiate one now.
+                *
+                * While ptraced, a task may be resumed while group stop is
+                * still in effect and then receive a stop signal and
+                * initiate another group stop.  This deviates from the
+                * usual behavior as two consecutive stop signals can't
+                * cause two group stops when !ptraced.  That is why we
+                * also check !task_is_stopped(t) below.
+                *
+                * The condition can be distinguished by testing whether
+                * SIGNAL_STOP_STOPPED is already set.  Don't generate
+                * group_exit_code in such case.
+                *
+                * This is not necessary for SIGNAL_STOP_CONTINUED because
+                * an intervening stop signal is required to cause two
+                * continued events regardless of ptrace.
                 */
-               sig->group_exit_code = signr;
+               if (!(sig->flags & SIGNAL_STOP_STOPPED))
+                       sig->group_exit_code = signr;
+
+               sig->group_stop_count = 0;
+
+               if (task_set_jobctl_pending(current, signr | gstop))
+                       sig->group_stop_count++;
 
-               sig->group_stop_count = 1;
-               for (t = next_thread(current); t != current; t = next_thread(t))
+               for (t = next_thread(current); t != current;
+                    t = next_thread(t)) {
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
-                       if (!(t->flags & PF_EXITING) &&
-                           !task_is_stopped_or_traced(t)) {
+                       if (!task_is_stopped(t) &&
+                           task_set_jobctl_pending(t, signr | gstop)) {
                                sig->group_stop_count++;
-                               signal_wake_up(t, 0);
+                               if (likely(!(t->ptrace & PT_SEIZED)))
+                                       signal_wake_up(t, 0);
+                               else
+                                       ptrace_trap_notify(t);
                        }
+               }
        }
-       /*
-        * If there are no other threads in the group, or if there is
-        * a group stop in progress and we are the last to stop, report
-        * to the parent.  When ptraced, every thread reports itself.
-        */
-       notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
-       notify = tracehook_notify_jctl(notify, CLD_STOPPED);
-       /*
-        * tracehook_notify_jctl() can drop and reacquire siglock, so
-        * we keep ->group_stop_count != 0 before the call. If SIGCONT
-        * or SIGKILL comes in between ->group_stop_count == 0.
-        */
-       if (sig->group_stop_count) {
-               if (!--sig->group_stop_count)
-                       sig->flags = SIGNAL_STOP_STOPPED;
-               current->exit_code = sig->group_exit_code;
+
+       if (likely(!current->ptrace)) {
+               int notify = 0;
+
+               /*
+                * If there are no other threads in the group, or if there
+                * is a group stop in progress and we are the last to stop,
+                * report to the parent.
+                */
+               if (task_participate_group_stop(current))
+                       notify = CLD_STOPPED;
+
                __set_current_state(TASK_STOPPED);
-       }
-       spin_unlock_irq(&current->sighand->siglock);
+               spin_unlock_irq(&current->sighand->siglock);
 
-       if (notify) {
-               read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current, notify);
-               read_unlock(&tasklist_lock);
-       }
+               /*
+                * Notify the parent of the group stop completion.  Because
+                * we're not holding either the siglock or tasklist_lock
+                * here, ptracer may attach inbetween; however, this is for
+                * group stop and should always be delivered to the real
+                * parent of the group leader.  The new ptracer will get
+                * its notification when this task transitions into
+                * TASK_TRACED.
+                */
+               if (notify) {
+                       read_lock(&tasklist_lock);
+                       do_notify_parent_cldstop(current, false, notify);
+                       read_unlock(&tasklist_lock);
+               }
 
-       /* Now we don't run again until woken by SIGCONT or SIGKILL */
-       do {
+               /* Now we don't run again until woken by SIGCONT or SIGKILL */
                schedule();
-       } while (try_to_freeze());
-
-       tracehook_finish_jctl();
-       current->exit_code = 0;
+               return true;
+       } else {
+               /*
+                * While ptraced, group stop is handled by STOP trap.
+                * Schedule it and let the caller deal with it.
+                */
+               task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
+               return false;
+       }
+}
 
-       return 1;
+/**
+ * do_jobctl_trap - take care of ptrace jobctl traps
+ *
+ * When PT_SEIZED, it's used for both group stop and explicit
+ * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
+ * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
+ * the stop signal; otherwise, %SIGTRAP.
+ *
+ * When !PT_SEIZED, it's used only for group stop trap with stop signal
+ * number as exit_code and no siginfo.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which may be
+ * released and re-acquired before returning with intervening sleep.
+ */
+static void do_jobctl_trap(void)
+{
+       struct signal_struct *signal = current->signal;
+       int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
+
+       if (current->ptrace & PT_SEIZED) {
+               if (!signal->group_stop_count &&
+                   !(signal->flags & SIGNAL_STOP_STOPPED))
+                       signr = SIGTRAP;
+               WARN_ON_ONCE(!signr);
+               ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
+                                CLD_STOPPED);
+       } else {
+               WARN_ON_ONCE(!signr);
+               ptrace_stop(signr, CLD_STOPPED, 0, NULL);
+               current->exit_code = 0;
+       }
 }
 
 static int ptrace_signal(int signr, siginfo_t *info,
                         struct pt_regs *regs, void *cookie)
 {
-       if (!task_ptrace(current))
-               return signr;
-
        ptrace_signal_deliver(regs, cookie);
-
-       /* Let the debugger run.  */
-       ptrace_stop(signr, 0, info);
+       /*
+        * We do not check sig_kernel_stop(signr) but set this marker
+        * unconditionally because we do not know whether debugger will
+        * change signr. This flag has no meaning unless we are going
+        * to stop after return from ptrace_stop(). In this case it will
+        * be checked in do_signal_stop(), we should only stop if it was
+        * not cleared by SIGCONT while we were sleeping. See also the
+        * comment in dequeue_signal().
+        */
+       current->jobctl |= JOBCTL_STOP_DEQUEUED;
+       ptrace_stop(signr, CLD_TRAPPED, 0, info);
 
        /* We're back.  Did the debugger cancel the sig?  */
        signr = current->exit_code;
@@ -1779,16 +2169,21 @@ static int ptrace_signal(int signr, siginfo_t *info,
 
        current->exit_code = 0;
 
-       /* Update the siginfo structure if the signal has
-          changed.  If the debugger wanted something
-          specific in the siginfo structure then it should
-          have updated *info via PTRACE_SETSIGINFO.  */
+       /*
+        * Update the siginfo structure if the signal has
+        * changed.  If the debugger wanted something
+        * specific in the siginfo structure then it should
+        * have updated *info via PTRACE_SETSIGINFO.
+        */
        if (signr != info->si_signo) {
                info->si_signo = signr;
                info->si_errno = 0;
                info->si_code = SI_USER;
+               rcu_read_lock();
                info->si_pid = task_pid_vnr(current->parent);
-               info->si_uid = task_uid(current->parent);
+               info->si_uid = map_cred_ns(__task_cred(current->parent),
+                               current_user_ns());
+               rcu_read_unlock();
        }
 
        /* If the (new) signal is now blocked, requeue it.  */
@@ -1823,55 +2218,63 @@ relock:
         * the CLD_ si_code into SIGNAL_CLD_MASK bits.
         */
        if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
-               int why = (signal->flags & SIGNAL_STOP_CONTINUED)
-                               ? CLD_CONTINUED : CLD_STOPPED;
+               int why;
+
+               if (signal->flags & SIGNAL_CLD_CONTINUED)
+                       why = CLD_CONTINUED;
+               else
+                       why = CLD_STOPPED;
+
                signal->flags &= ~SIGNAL_CLD_MASK;
 
-               why = tracehook_notify_jctl(why, CLD_CONTINUED);
                spin_unlock_irq(&sighand->siglock);
 
-               if (why) {
-                       read_lock(&tasklist_lock);
-                       do_notify_parent_cldstop(current->group_leader, why);
-                       read_unlock(&tasklist_lock);
-               }
+               /*
+                * Notify the parent that we're continuing.  This event is
+                * always per-process and doesn't make whole lot of sense
+                * for ptracers, who shouldn't consume the state via
+                * wait(2) either, but, for backward compatibility, notify
+                * the ptracer of the group leader too unless it's gonna be
+                * a duplicate.
+                */
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(current, false, why);
+
+               if (ptrace_reparented(current->group_leader))
+                       do_notify_parent_cldstop(current->group_leader,
+                                               true, why);
+               read_unlock(&tasklist_lock);
+
                goto relock;
        }
 
        for (;;) {
                struct k_sigaction *ka;
 
-               if (unlikely(signal->group_stop_count > 0) &&
+               if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
                    do_signal_stop(0))
                        goto relock;
 
-               /*
-                * Tracing can induce an artifical signal and choose sigaction.
-                * The return value in @signr determines the default action,
-                * but @info->si_signo is the signal number we will report.
-                */
-               signr = tracehook_get_signal(current, regs, info, return_ka);
-               if (unlikely(signr < 0))
+               if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
+                       do_jobctl_trap();
+                       spin_unlock_irq(&sighand->siglock);
                        goto relock;
-               if (unlikely(signr != 0))
-                       ka = return_ka;
-               else {
-                       signr = dequeue_signal(current, &current->blocked,
-                                              info);
+               }
 
-                       if (!signr)
-                               break; /* will return 0 */
+               signr = dequeue_signal(current, &current->blocked, info);
 
-                       if (signr != SIGKILL) {
-                               signr = ptrace_signal(signr, info,
-                                                     regs, cookie);
-                               if (!signr)
-                                       continue;
-                       }
+               if (!signr)
+                       break; /* will return 0 */
 
-                       ka = &sighand->action[signr-1];
+               if (unlikely(current->ptrace) && signr != SIGKILL) {
+                       signr = ptrace_signal(signr, info,
+                                             regs, cookie);
+                       if (!signr)
+                               continue;
                }
 
+               ka = &sighand->action[signr-1];
+
                /* Trace actually delivered signals. */
                trace_signal_deliver(signr, info, ka);
 
@@ -1972,13 +2375,73 @@ relock:
        return signr;
 }
 
+/**
+ * block_sigmask - add @ka's signal mask to current->blocked
+ * @ka: action for @signr
+ * @signr: signal that has been successfully delivered
+ *
+ * This function should be called when a signal has succesfully been
+ * delivered. It adds the mask of signals for @ka to current->blocked
+ * so that they are blocked during the execution of the signal
+ * handler. In addition, @signr will be blocked unless %SA_NODEFER is
+ * set in @ka->sa.sa_flags.
+ */
+void block_sigmask(struct k_sigaction *ka, int signr)
+{
+       sigset_t blocked;
+
+       sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+       if (!(ka->sa.sa_flags & SA_NODEFER))
+               sigaddset(&blocked, signr);
+       set_current_blocked(&blocked);
+}
+
+/*
+ * It could be that complete_signal() picked us to notify about the
+ * group-wide signal. Other threads should be notified now to take
+ * the shared signals in @which since we will not.
+ */
+static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
+{
+       sigset_t retarget;
+       struct task_struct *t;
+
+       sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
+       if (sigisemptyset(&retarget))
+               return;
+
+       t = tsk;
+       while_each_thread(tsk, t) {
+               if (t->flags & PF_EXITING)
+                       continue;
+
+               if (!has_pending_signals(&retarget, &t->blocked))
+                       continue;
+               /* Remove the signals this thread can handle. */
+               sigandsets(&retarget, &retarget, &t->blocked);
+
+               if (!signal_pending(t))
+                       signal_wake_up(t, 0);
+
+               if (sigisemptyset(&retarget))
+                       break;
+       }
+}
+
 void exit_signals(struct task_struct *tsk)
 {
        int group_stop = 0;
-       struct task_struct *t;
+       sigset_t unblocked;
+
+       /*
+        * @tsk is about to have PF_EXITING set - lock out users which
+        * expect stable threadgroup.
+        */
+       threadgroup_change_begin(tsk);
 
        if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
                tsk->flags |= PF_EXITING;
+               threadgroup_change_end(tsk);
                return;
        }
 
@@ -1988,28 +2451,29 @@ void exit_signals(struct task_struct *tsk)
         * see wants_signal(), do_signal_stop().
         */
        tsk->flags |= PF_EXITING;
+
+       threadgroup_change_end(tsk);
+
        if (!signal_pending(tsk))
                goto out;
 
-       /* It could be that __group_complete_signal() choose us to
-        * notify about group-wide signal. Another thread should be
-        * woken now to take the signal since we will not.
-        */
-       for (t = tsk; (t = next_thread(t)) != tsk; )
-               if (!signal_pending(t) && !(t->flags & PF_EXITING))
-                       recalc_sigpending_and_wake(t);
+       unblocked = tsk->blocked;
+       signotset(&unblocked);
+       retarget_shared_pending(tsk, &unblocked);
 
-       if (unlikely(tsk->signal->group_stop_count) &&
-                       !--tsk->signal->group_stop_count) {
-               tsk->signal->flags = SIGNAL_STOP_STOPPED;
-               group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
-       }
+       if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
+           task_participate_group_stop(tsk))
+               group_stop = CLD_STOPPED;
 out:
        spin_unlock_irq(&tsk->sighand->siglock);
 
+       /*
+        * If group stop has completed, deliver the notification.  This
+        * should always go to the real parent of the group leader.
+        */
        if (unlikely(group_stop)) {
                read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(tsk, group_stop);
+               do_notify_parent_cldstop(tsk, false, group_stop);
                read_unlock(&tasklist_lock);
        }
 }
@@ -2029,6 +2493,9 @@ EXPORT_SYMBOL(unblock_all_signals);
  * System call entry points.
  */
 
+/**
+ *  sys_restart_syscall - restart a system call
+ */
 SYSCALL_DEFINE0(restart_syscall)
 {
        struct restart_block *restart = &current_thread_info()->restart_block;
@@ -2040,11 +2507,33 @@ long do_no_restart_syscall(struct restart_block *param)
        return -EINTR;
 }
 
-/*
- * We don't need to get the kernel lock - this is all local to this
- * particular thread.. (and that's good, because this is _heavily_
- * used by various programs)
+static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
+{
+       if (signal_pending(tsk) && !thread_group_empty(tsk)) {
+               sigset_t newblocked;
+               /* A set of now blocked but previously unblocked signals. */
+               sigandnsets(&newblocked, newset, &current->blocked);
+               retarget_shared_pending(tsk, &newblocked);
+       }
+       tsk->blocked = *newset;
+       recalc_sigpending();
+}
+
+/**
+ * set_current_blocked - change current->blocked mask
+ * @newset: new mask
+ *
+ * It is wrong to change ->blocked directly, this helper should be used
+ * to ensure the process can't miss a shared signal we are going to block.
  */
+void set_current_blocked(const sigset_t *newset)
+{
+       struct task_struct *tsk = current;
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       __set_task_blocked(tsk, newset);
+       spin_unlock_irq(&tsk->sighand->siglock);
+}
 
 /*
  * This is also useful for kernel threads that want to temporarily
@@ -2056,66 +2545,66 @@ long do_no_restart_syscall(struct restart_block *param)
  */
 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
 {
-       int error;
+       struct task_struct *tsk = current;
+       sigset_t newset;
 
-       spin_lock_irq(&current->sighand->siglock);
+       /* Lockless, only current can change ->blocked, never from irq */
        if (oldset)
-               *oldset = current->blocked;
+               *oldset = tsk->blocked;
 
-       error = 0;
        switch (how) {
        case SIG_BLOCK:
-               sigorsets(&current->blocked, &current->blocked, set);
+               sigorsets(&newset, &tsk->blocked, set);
                break;
        case SIG_UNBLOCK:
-               signandsets(&current->blocked, &current->blocked, set);
+               sigandnsets(&newset, &tsk->blocked, set);
                break;
        case SIG_SETMASK:
-               current->blocked = *set;
+               newset = *set;
                break;
        default:
-               error = -EINVAL;
+               return -EINVAL;
        }
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
 
-       return error;
+       set_current_blocked(&newset);
+       return 0;
 }
 
-SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
+/**
+ *  sys_rt_sigprocmask - change the list of currently blocked signals
+ *  @how: whether to add, remove, or set signals
+ *  @nset: stores pending signals
+ *  @oset: previous value of signal mask if non-null
+ *  @sigsetsize: size of sigset_t type
+ */
+SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
                sigset_t __user *, oset, size_t, sigsetsize)
 {
-       int error = -EINVAL;
        sigset_t old_set, new_set;
+       int error;
 
        /* XXX: Don't preclude handling different sized sigset_t's.  */
        if (sigsetsize != sizeof(sigset_t))
-               goto out;
+               return -EINVAL;
 
-       if (set) {
-               error = -EFAULT;
-               if (copy_from_user(&new_set, set, sizeof(*set)))
-                       goto out;
+       old_set = current->blocked;
+
+       if (nset) {
+               if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
+                       return -EFAULT;
                sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
 
-               error = sigprocmask(how, &new_set, &old_set);
+               error = sigprocmask(how, &new_set, NULL);
                if (error)
-                       goto out;
-               if (oset)
-                       goto set_old;
-       } else if (oset) {
-               spin_lock_irq(&current->sighand->siglock);
-               old_set = current->blocked;
-               spin_unlock_irq(&current->sighand->siglock);
+                       return error;
+       }
 
-       set_old:
-               error = -EFAULT;
-               if (copy_to_user(oset, &old_set, sizeof(*oset)))
-                       goto out;
+       if (oset) {
+               if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
+                       return -EFAULT;
        }
-       error = 0;
-out:
-       return error;
+
+       return 0;
 }
 
 long do_sigpending(void __user *set, unsigned long sigsetsize)
@@ -2140,8 +2629,14 @@ long do_sigpending(void __user *set, unsigned long sigsetsize)
 
 out:
        return error;
-}      
+}
 
+/**
+ *  sys_rt_sigpending - examine a pending signal that has been raised
+ *                     while blocked
+ *  @set: stores pending signals
+ *  @sigsetsize: size of sigset_t type or larger
+ */
 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
 {
        return do_sigpending(set, sigsetsize);
@@ -2189,6 +2684,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
 #ifdef __ARCH_SI_TRAPNO
                err |= __put_user(from->si_trapno, &to->si_trapno);
 #endif
+#ifdef BUS_MCEERR_AO
+               /*
+                * Other callers might not initialize the si_lsb field,
+                * so check explicitly for the right codes here.
+                */
+               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+                       err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+#endif
                break;
        case __SI_CHLD:
                err |= __put_user(from->si_pid, &to->si_pid);
@@ -2213,15 +2716,82 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
 
 #endif
 
+/**
+ *  do_sigtimedwait - wait for queued signals specified in @which
+ *  @which: queued signals to wait for
+ *  @info: if non-null, the signal's siginfo is returned here
+ *  @ts: upper bound on process time suspension
+ */
+int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
+                       const struct timespec *ts)
+{
+       struct task_struct *tsk = current;
+       long timeout = MAX_SCHEDULE_TIMEOUT;
+       sigset_t mask = *which;
+       int sig;
+
+       if (ts) {
+               if (!timespec_valid(ts))
+                       return -EINVAL;
+               timeout = timespec_to_jiffies(ts);
+               /*
+                * We can be close to the next tick, add another one
+                * to ensure we will wait at least the time asked for.
+                */
+               if (ts->tv_sec || ts->tv_nsec)
+                       timeout++;
+       }
+
+       /*
+        * Invert the set of allowed signals to get those we want to block.
+        */
+       sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
+       signotset(&mask);
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       sig = dequeue_signal(tsk, &mask, info);
+       if (!sig && timeout) {
+               /*
+                * None ready, temporarily unblock those we're interested
+                * while we are sleeping in so that we'll be awakened when
+                * they arrive. Unblocking is always fine, we can avoid
+                * set_current_blocked().
+                */
+               tsk->real_blocked = tsk->blocked;
+               sigandsets(&tsk->blocked, &tsk->blocked, &mask);
+               recalc_sigpending();
+               spin_unlock_irq(&tsk->sighand->siglock);
+
+               timeout = schedule_timeout_interruptible(timeout);
+
+               spin_lock_irq(&tsk->sighand->siglock);
+               __set_task_blocked(tsk, &tsk->real_blocked);
+               siginitset(&tsk->real_blocked, 0);
+               sig = dequeue_signal(tsk, &mask, info);
+       }
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       if (sig)
+               return sig;
+       return timeout ? -EINTR : -EAGAIN;
+}
+
+/**
+ *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
+ *                     in @uthese
+ *  @uthese: queued signals to wait for
+ *  @uinfo: if non-null, the signal's siginfo is returned here
+ *  @uts: upper bound on process time suspension
+ *  @sigsetsize: size of sigset_t type
+ */
 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
                siginfo_t __user *, uinfo, const struct timespec __user *, uts,
                size_t, sigsetsize)
 {
-       int ret, sig;
        sigset_t these;
        struct timespec ts;
        siginfo_t info;
-       long timeout = 0;
+       int ret;
 
        /* XXX: Don't preclude handling different sized sigset_t's.  */
        if (sigsetsize != sizeof(sigset_t))
@@ -2229,65 +2799,27 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
 
        if (copy_from_user(&these, uthese, sizeof(these)))
                return -EFAULT;
-               
-       /*
-        * Invert the set of allowed signals to get those we
-        * want to block.
-        */
-       sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
-       signotset(&these);
 
        if (uts) {
                if (copy_from_user(&ts, uts, sizeof(ts)))
                        return -EFAULT;
-               if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
-                   || ts.tv_sec < 0)
-                       return -EINVAL;
        }
 
-       spin_lock_irq(&current->sighand->siglock);
-       sig = dequeue_signal(current, &these, &info);
-       if (!sig) {
-               timeout = MAX_SCHEDULE_TIMEOUT;
-               if (uts)
-                       timeout = (timespec_to_jiffies(&ts)
-                                  + (ts.tv_sec || ts.tv_nsec));
-
-               if (timeout) {
-                       /* None ready -- temporarily unblock those we're
-                        * interested while we are sleeping in so that we'll
-                        * be awakened when they arrive.  */
-                       current->real_blocked = current->blocked;
-                       sigandsets(&current->blocked, &current->blocked, &these);
-                       recalc_sigpending();
-                       spin_unlock_irq(&current->sighand->siglock);
-
-                       timeout = schedule_timeout_interruptible(timeout);
-
-                       spin_lock_irq(&current->sighand->siglock);
-                       sig = dequeue_signal(current, &these, &info);
-                       current->blocked = current->real_blocked;
-                       siginitset(&current->real_blocked, 0);
-                       recalc_sigpending();
-               }
-       }
-       spin_unlock_irq(&current->sighand->siglock);
+       ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
 
-       if (sig) {
-               ret = sig;
-               if (uinfo) {
-                       if (copy_siginfo_to_user(uinfo, &info))
-                               ret = -EFAULT;
-               }
-       } else {
-               ret = -EAGAIN;
-               if (timeout)
-                       ret = -EINTR;
+       if (ret > 0 && uinfo) {
+               if (copy_siginfo_to_user(uinfo, &info))
+                       ret = -EFAULT;
        }
 
        return ret;
 }
 
+/**
+ *  sys_kill - send a signal to a process
+ *  @pid: the PID of the process
+ *  @sig: signal to be sent
+ */
 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
 {
        struct siginfo info;
@@ -2363,7 +2895,11 @@ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
        return do_tkill(tgid, pid, sig);
 }
 
-/*
+/**
+ *  sys_tkill - send signal to one specific task
+ *  @pid: the PID of the task
+ *  @sig: signal to be sent
+ *
  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
  */
 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
@@ -2375,6 +2911,12 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
        return do_tkill(0, pid, sig);
 }
 
+/**
+ *  sys_rt_sigqueueinfo - send signal information to a signal
+ *  @pid: the PID of the thread
+ *  @sig: signal to be sent
+ *  @uinfo: signal info to be sent
+ */
 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
                siginfo_t __user *, uinfo)
 {
@@ -2384,9 +2926,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
                return -EFAULT;
 
        /* Not even root can pretend to send signals from the kernel.
-          Nor can they impersonate a kill(), which adds source info.  */
-       if (info.si_code >= 0)
+        * Nor can they impersonate a kill()/tgkill(), which adds source info.
+        */
+       if (info.si_code >= 0 || info.si_code == SI_TKILL) {
+               /* We used to allow any < 0 si_code */
+               WARN_ON_ONCE(info.si_code < 0);
                return -EPERM;
+       }
        info.si_signo = sig;
 
        /* POSIX.1b doesn't mention process groups.  */
@@ -2400,9 +2946,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
                return -EINVAL;
 
        /* Not even root can pretend to send signals from the kernel.
-          Nor can they impersonate a kill(), which adds source info.  */
-       if (info->si_code >= 0)
+        * Nor can they impersonate a kill()/tgkill(), which adds source info.
+        */
+       if (info->si_code >= 0 || info->si_code == SI_TKILL) {
+               /* We used to allow any < 0 si_code */
+               WARN_ON_ONCE(info->si_code < 0);
                return -EPERM;
+       }
        info->si_signo = sig;
 
        return do_send_specific(tgid, pid, sig, info);
@@ -2494,12 +3044,11 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
 
                error = -EINVAL;
                /*
-                *
-                * Note - this code used to test ss_flags incorrectly
+                * Note - this code used to test ss_flags incorrectly:
                 *        old code may have been written using ss_flags==0
                 *        to mean ss_flags==SS_ONSTACK (as this was the only
                 *        way that worked) - this fix preserves that older
-                *        mechanism
+                *        mechanism.
                 */
                if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
                        goto out;
@@ -2533,6 +3082,10 @@ out:
 
 #ifdef __ARCH_WANT_SYS_SIGPENDING
 
+/**
+ *  sys_sigpending - examine pending signals
+ *  @set: where mask of pending signal is returned
+ */
 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
 {
        return do_sigpending(set, sizeof(*set));
@@ -2541,60 +3094,65 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
 #endif
 
 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
-/* Some platforms have their own version with special arguments others
-   support only sys_rt_sigprocmask.  */
+/**
+ *  sys_sigprocmask - examine and change blocked signals
+ *  @how: whether to add, remove, or set signals
+ *  @nset: signals to add or remove (if non-null)
+ *  @oset: previous value of signal mask if non-null
+ *
+ * Some platforms have their own version with special arguments;
+ * others support only sys_rt_sigprocmask.
+ */
 
-SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
+SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
                old_sigset_t __user *, oset)
 {
-       int error;
        old_sigset_t old_set, new_set;
+       sigset_t new_blocked;
 
-       if (set) {
-               error = -EFAULT;
-               if (copy_from_user(&new_set, set, sizeof(*set)))
-                       goto out;
+       old_set = current->blocked.sig[0];
+
+       if (nset) {
+               if (copy_from_user(&new_set, nset, sizeof(*nset)))
+                       return -EFAULT;
                new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
 
-               spin_lock_irq(&current->sighand->siglock);
-               old_set = current->blocked.sig[0];
+               new_blocked = current->blocked;
 
-               error = 0;
                switch (how) {
-               default:
-                       error = -EINVAL;
-                       break;
                case SIG_BLOCK:
-                       sigaddsetmask(&current->blocked, new_set);
+                       sigaddsetmask(&new_blocked, new_set);
                        break;
                case SIG_UNBLOCK:
-                       sigdelsetmask(&current->blocked, new_set);
+                       sigdelsetmask(&new_blocked, new_set);
                        break;
                case SIG_SETMASK:
-                       current->blocked.sig[0] = new_set;
+                       new_blocked.sig[0] = new_set;
                        break;
+               default:
+                       return -EINVAL;
                }
 
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-               if (error)
-                       goto out;
-               if (oset)
-                       goto set_old;
-       } else if (oset) {
-               old_set = current->blocked.sig[0];
-       set_old:
-               error = -EFAULT;
+               set_current_blocked(&new_blocked);
+       }
+
+       if (oset) {
                if (copy_to_user(oset, &old_set, sizeof(*oset)))
-                       goto out;
+                       return -EFAULT;
        }
-       error = 0;
-out:
-       return error;
+
+       return 0;
 }
 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
 
 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
+/**
+ *  sys_rt_sigaction - alter an action taken by a process
+ *  @sig: signal to be sent
+ *  @act: new sigaction
+ *  @oact: used to save the previous sigaction
+ *  @sigsetsize: size of sigset_t type
+ */
 SYSCALL_DEFINE4(rt_sigaction, int, sig,
                const struct sigaction __user *, act,
                struct sigaction __user *, oact,
@@ -2636,15 +3194,11 @@ SYSCALL_DEFINE0(sgetmask)
 
 SYSCALL_DEFINE1(ssetmask, int, newmask)
 {
-       int old;
+       int old = current->blocked.sig[0];
+       sigset_t newset;
 
-       spin_lock_irq(&current->sighand->siglock);
-       old = current->blocked.sig[0];
-
-       siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
-                                                 sigmask(SIGSTOP)));
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
+       siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
+       set_current_blocked(&newset);
 
        return old;
 }
@@ -2673,14 +3227,22 @@ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
 
 SYSCALL_DEFINE0(pause)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
+       while (!signal_pending(current)) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+       }
        return -ERESTARTNOHAND;
 }
 
 #endif
 
 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+/**
+ *  sys_rt_sigsuspend - replace the signal mask for a value with the
+ *     @unewset value until a signal is received
+ *  @unewset: new signal mask value
+ *  @sigsetsize: size of sigset_t type
+ */
 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
 {
        sigset_t newset;
@@ -2693,11 +3255,8 @@ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
                return -EFAULT;
        sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
 
-       spin_lock_irq(&current->sighand->siglock);
        current->saved_sigmask = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
+       set_current_blocked(&newset);
 
        current->state = TASK_INTERRUPTIBLE;
        schedule();
@@ -2715,3 +3274,43 @@ void __init signals_init(void)
 {
        sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 }
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/kdb.h>
+/*
+ * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * signal internals.  This function checks if the required locks are
+ * available before calling the main signal code, to avoid kdb
+ * deadlocks.
+ */
+void
+kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+{
+       static struct task_struct *kdb_prev_t;
+       int sig, new_t;
+       if (!spin_trylock(&t->sighand->siglock)) {
+               kdb_printf("Can't do kill command now.\n"
+                          "The sigmask lock is held somewhere else in "
+                          "kernel, try again later\n");
+               return;
+       }
+       spin_unlock(&t->sighand->siglock);
+       new_t = kdb_prev_t != t;
+       kdb_prev_t = t;
+       if (t->state != TASK_RUNNING && new_t) {
+               kdb_printf("Process is not RUNNING, sending a signal from "
+                          "kdb risks deadlock\n"
+                          "on the run queue locks. "
+                          "The signal has _not_ been sent.\n"
+                          "Reissue the kill command if you want to risk "
+                          "the deadlock.\n");
+               return;
+       }
+       sig = info->si_signo;
+       if (send_sig_info(sig, info, t))
+               kdb_printf("Fail to deliver Signal %d to process %d.\n",
+                          sig, t->pid);
+       else
+               kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
+}
+#endif /* CONFIG_KGDB_KDB */