2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <asm/param.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
28 #include <asm/siginfo.h>
30 extern void k_getrusage(struct task_struct *, int, struct rusage *);
33 * SLAB caches for signal bits.
36 static kmem_cache_t *sigqueue_cachep;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 static int sig_ignored(struct task_struct *t, int sig)
158 void __user * handler;
161 * Tracers always want to know about signals..
163 if (t->ptrace & PT_PTRACED)
167 * Blocked signals are never ignored, since the
168 * signal handler may change by the time it is
171 if (sigismember(&t->blocked, sig))
174 /* Is it explicitly or implicitly ignored? */
175 handler = t->sighand->action[sig-1].sa.sa_handler;
176 return handler == SIG_IGN ||
177 (handler == SIG_DFL && sig_kernel_ignore(sig));
181 * Re-calculate pending state from the set of locally pending
182 * signals, globally pending signals, and blocked signals.
184 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
189 switch (_NSIG_WORDS) {
191 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
192 ready |= signal->sig[i] &~ blocked->sig[i];
195 case 4: ready = signal->sig[3] &~ blocked->sig[3];
196 ready |= signal->sig[2] &~ blocked->sig[2];
197 ready |= signal->sig[1] &~ blocked->sig[1];
198 ready |= signal->sig[0] &~ blocked->sig[0];
201 case 2: ready = signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
205 case 1: ready = signal->sig[0] &~ blocked->sig[0];
210 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 if (t->signal->group_stop_count > 0 ||
215 PENDING(&t->pending, &t->blocked) ||
216 PENDING(&t->signal->shared_pending, &t->blocked))
217 set_tsk_thread_flag(t, TIF_SIGPENDING);
219 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 void recalc_sigpending(void)
224 recalc_sigpending_tsk(current);
227 /* Given the mask, find the first available signal that should be serviced. */
230 next_signal(struct sigpending *pending, sigset_t *mask)
232 unsigned long i, *s, *m, x;
235 s = pending->signal.sig;
237 switch (_NSIG_WORDS) {
239 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
240 if ((x = *s &~ *m) != 0) {
241 sig = ffz(~x) + i*_NSIG_BPW + 1;
246 case 2: if ((x = s[0] &~ m[0]) != 0)
248 else if ((x = s[1] &~ m[1]) != 0)
255 case 1: if ((x = *s &~ *m) != 0)
263 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags)
265 struct sigqueue *q = NULL;
267 if (atomic_read(&t->user->sigpending) <
268 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
269 q = kmem_cache_alloc(sigqueue_cachep, flags);
271 INIT_LIST_HEAD(&q->list);
274 q->user = get_uid(t->user);
275 atomic_inc(&q->user->sigpending);
280 static inline void __sigqueue_free(struct sigqueue *q)
282 if (q->flags & SIGQUEUE_PREALLOC)
284 atomic_dec(&q->user->sigpending);
286 kmem_cache_free(sigqueue_cachep, q);
289 static void flush_sigqueue(struct sigpending *queue)
293 sigemptyset(&queue->signal);
294 while (!list_empty(&queue->list)) {
295 q = list_entry(queue->list.next, struct sigqueue , list);
296 list_del_init(&q->list);
302 * Flush all pending signals for a task.
306 flush_signals(struct task_struct *t)
310 spin_lock_irqsave(&t->sighand->siglock, flags);
311 clear_tsk_thread_flag(t,TIF_SIGPENDING);
312 flush_sigqueue(&t->pending);
313 flush_sigqueue(&t->signal->shared_pending);
314 spin_unlock_irqrestore(&t->sighand->siglock, flags);
318 * This function expects the tasklist_lock write-locked.
320 void __exit_sighand(struct task_struct *tsk)
322 struct sighand_struct * sighand = tsk->sighand;
324 /* Ok, we're done with the signal handlers */
326 if (atomic_dec_and_test(&sighand->count))
327 kmem_cache_free(sighand_cachep, sighand);
330 void exit_sighand(struct task_struct *tsk)
332 write_lock_irq(&tasklist_lock);
334 write_unlock_irq(&tasklist_lock);
338 * This function expects the tasklist_lock write-locked.
340 void __exit_signal(struct task_struct *tsk)
342 struct signal_struct * sig = tsk->signal;
343 struct sighand_struct * sighand = tsk->sighand;
347 if (!atomic_read(&sig->count))
349 spin_lock(&sighand->siglock);
350 if (atomic_dec_and_test(&sig->count)) {
351 if (tsk == sig->curr_target)
352 sig->curr_target = next_thread(tsk);
354 spin_unlock(&sighand->siglock);
355 flush_sigqueue(&sig->shared_pending);
358 * If there is any task waiting for the group exit
361 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
362 wake_up_process(sig->group_exit_task);
363 sig->group_exit_task = NULL;
365 if (tsk == sig->curr_target)
366 sig->curr_target = next_thread(tsk);
369 * Accumulate here the counters for all threads but the
370 * group leader as they die, so they can be added into
371 * the process-wide totals when those are taken.
372 * The group leader stays around as a zombie as long
373 * as there are other threads. When it gets reaped,
374 * the exit.c code will add its counts into these totals.
375 * We won't ever get here for the group leader, since it
376 * will have been the last reference on the signal_struct.
378 sig->utime = cputime_add(sig->utime, tsk->utime);
379 sig->stime = cputime_add(sig->stime, tsk->stime);
380 sig->min_flt += tsk->min_flt;
381 sig->maj_flt += tsk->maj_flt;
382 sig->nvcsw += tsk->nvcsw;
383 sig->nivcsw += tsk->nivcsw;
384 spin_unlock(&sighand->siglock);
385 sig = NULL; /* Marker for below. */
387 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
388 flush_sigqueue(&tsk->pending);
391 * We are cleaning up the signal_struct here. We delayed
392 * calling exit_itimers until after flush_sigqueue, just in
393 * case our thread-local pending queue contained a queued
394 * timer signal that would have been cleared in
395 * exit_itimers. When that called sigqueue_free, it would
396 * attempt to re-take the tasklist_lock and deadlock. This
397 * can never happen if we ensure that all queues the
398 * timer's signal might be queued on have been flushed
399 * first. The shared_pending queue, and our own pending
400 * queue are the only queues the timer could be on, since
401 * there are no other threads left in the group and timer
402 * signals are constrained to threads inside the group.
405 kmem_cache_free(signal_cachep, sig);
409 void exit_signal(struct task_struct *tsk)
411 write_lock_irq(&tasklist_lock);
413 write_unlock_irq(&tasklist_lock);
417 * Flush all handlers for a task.
421 flush_signal_handlers(struct task_struct *t, int force_default)
424 struct k_sigaction *ka = &t->sighand->action[0];
425 for (i = _NSIG ; i != 0 ; i--) {
426 if (force_default || ka->sa.sa_handler != SIG_IGN)
427 ka->sa.sa_handler = SIG_DFL;
429 sigemptyset(&ka->sa.sa_mask);
435 /* Notify the system that a driver wants to block all signals for this
436 * process, and wants to be notified if any signals at all were to be
437 * sent/acted upon. If the notifier routine returns non-zero, then the
438 * signal will be acted upon after all. If the notifier routine returns 0,
439 * then then signal will be blocked. Only one block per process is
440 * allowed. priv is a pointer to private data that the notifier routine
441 * can use to determine if the signal should be blocked or not. */
444 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
448 spin_lock_irqsave(¤t->sighand->siglock, flags);
449 current->notifier_mask = mask;
450 current->notifier_data = priv;
451 current->notifier = notifier;
452 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
455 /* Notify the system that blocking has ended. */
458 unblock_all_signals(void)
462 spin_lock_irqsave(¤t->sighand->siglock, flags);
463 current->notifier = NULL;
464 current->notifier_data = NULL;
466 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
469 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
471 struct sigqueue *q, *first = NULL;
472 int still_pending = 0;
474 if (unlikely(!sigismember(&list->signal, sig)))
478 * Collect the siginfo appropriate to this signal. Check if
479 * there is another siginfo for the same signal.
481 list_for_each_entry(q, &list->list, list) {
482 if (q->info.si_signo == sig) {
491 list_del_init(&first->list);
492 copy_siginfo(info, &first->info);
493 __sigqueue_free(first);
495 sigdelset(&list->signal, sig);
498 /* Ok, it wasn't in the queue. This must be
499 a fast-pathed signal or we must have been
500 out of queue space. So zero out the info.
502 sigdelset(&list->signal, sig);
503 info->si_signo = sig;
512 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
517 sig = next_signal(pending, mask);
519 if (current->notifier) {
520 if (sigismember(current->notifier_mask, sig)) {
521 if (!(current->notifier)(current->notifier_data)) {
522 clear_thread_flag(TIF_SIGPENDING);
528 if (!collect_signal(sig, pending, info))
538 * Dequeue a signal and return the element to the caller, which is
539 * expected to free it.
541 * All callers have to hold the siglock.
543 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
545 int signr = __dequeue_signal(&tsk->pending, mask, info);
547 signr = __dequeue_signal(&tsk->signal->shared_pending,
549 if (signr && unlikely(sig_kernel_stop(signr))) {
551 * Set a marker that we have dequeued a stop signal. Our
552 * caller might release the siglock and then the pending
553 * stop signal it is about to process is no longer in the
554 * pending bitmasks, but must still be cleared by a SIGCONT
555 * (and overruled by a SIGKILL). So those cases clear this
556 * shared flag after we've set it. Note that this flag may
557 * remain set after the signal we return is ignored or
558 * handled. That doesn't matter because its only purpose
559 * is to alert stop-signal processing code when another
560 * processor has come along and cleared the flag.
562 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
565 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
566 info->si_sys_private){
567 do_schedule_next_timer(info);
573 * Tell a process that it has a new active signal..
575 * NOTE! we rely on the previous spin_lock to
576 * lock interrupts for us! We can only be called with
577 * "siglock" held, and the local interrupt must
578 * have been disabled when that got acquired!
580 * No need to set need_resched since signal event passing
581 * goes through ->blocked
583 void signal_wake_up(struct task_struct *t, int resume)
587 set_tsk_thread_flag(t, TIF_SIGPENDING);
590 * For SIGKILL, we want to wake it up in the stopped/traced case.
591 * We don't check t->state here because there is a race with it
592 * executing another processor and just now entering stopped state.
593 * By using wake_up_state, we ensure the process will wake up and
594 * handle its death signal.
596 mask = TASK_INTERRUPTIBLE;
598 mask |= TASK_STOPPED | TASK_TRACED;
599 if (!wake_up_state(t, mask))
604 * Remove signals in mask from the pending set and queue.
605 * Returns 1 if any signals were found.
607 * All callers must be holding the siglock.
609 static int rm_from_queue(unsigned long mask, struct sigpending *s)
611 struct sigqueue *q, *n;
613 if (!sigtestsetmask(&s->signal, mask))
616 sigdelsetmask(&s->signal, mask);
617 list_for_each_entry_safe(q, n, &s->list, list) {
618 if (q->info.si_signo < SIGRTMIN &&
619 (mask & sigmask(q->info.si_signo))) {
620 list_del_init(&q->list);
628 * Bad permissions for sending the signal
630 static int check_kill_permission(int sig, struct siginfo *info,
631 struct task_struct *t)
634 if (sig < 0 || sig > _NSIG)
637 if ((!info || ((unsigned long)info != 1 &&
638 (unsigned long)info != 2 && SI_FROMUSER(info)))
639 && ((sig != SIGCONT) ||
640 (current->signal->session != t->signal->session))
641 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
642 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
643 && !capable(CAP_KILL))
645 return security_task_kill(t, info, sig);
649 static void do_notify_parent_cldstop(struct task_struct *tsk,
650 struct task_struct *parent,
654 * Handle magic process-wide effects of stop/continue signals.
655 * Unlike the signal actions, these happen immediately at signal-generation
656 * time regardless of blocking, ignoring, or handling. This does the
657 * actual continuing for SIGCONT, but not the actual stopping for stop
658 * signals. The process stop is done as a signal action for SIG_DFL.
660 static void handle_stop_signal(int sig, struct task_struct *p)
662 struct task_struct *t;
664 if (p->flags & SIGNAL_GROUP_EXIT)
666 * The process is in the middle of dying already.
670 if (sig_kernel_stop(sig)) {
672 * This is a stop signal. Remove SIGCONT from all queues.
674 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
677 rm_from_queue(sigmask(SIGCONT), &t->pending);
680 } else if (sig == SIGCONT) {
682 * Remove all stop signals from all queues,
683 * and wake all threads.
685 if (unlikely(p->signal->group_stop_count > 0)) {
687 * There was a group stop in progress. We'll
688 * pretend it finished before we got here. We are
689 * obliged to report it to the parent: if the
690 * SIGSTOP happened "after" this SIGCONT, then it
691 * would have cleared this pending SIGCONT. If it
692 * happened "before" this SIGCONT, then the parent
693 * got the SIGCHLD about the stop finishing before
694 * the continue happened. We do the notification
695 * now, and it's as if the stop had finished and
696 * the SIGCHLD was pending on entry to this kill.
698 p->signal->group_stop_count = 0;
699 p->signal->flags = SIGNAL_STOP_CONTINUED;
700 spin_unlock(&p->sighand->siglock);
701 if (p->ptrace & PT_PTRACED)
702 do_notify_parent_cldstop(p, p->parent,
705 do_notify_parent_cldstop(
707 p->group_leader->real_parent,
709 spin_lock(&p->sighand->siglock);
711 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
715 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
718 * If there is a handler for SIGCONT, we must make
719 * sure that no thread returns to user mode before
720 * we post the signal, in case it was the only
721 * thread eligible to run the signal handler--then
722 * it must not do anything between resuming and
723 * running the handler. With the TIF_SIGPENDING
724 * flag set, the thread will pause and acquire the
725 * siglock that we hold now and until we've queued
726 * the pending signal.
728 * Wake up the stopped thread _after_ setting
731 state = TASK_STOPPED;
732 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
733 set_tsk_thread_flag(t, TIF_SIGPENDING);
734 state |= TASK_INTERRUPTIBLE;
736 wake_up_state(t, state);
741 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
743 * We were in fact stopped, and are now continued.
744 * Notify the parent with CLD_CONTINUED.
746 p->signal->flags = SIGNAL_STOP_CONTINUED;
747 p->signal->group_exit_code = 0;
748 spin_unlock(&p->sighand->siglock);
749 if (p->ptrace & PT_PTRACED)
750 do_notify_parent_cldstop(p, p->parent,
753 do_notify_parent_cldstop(
755 p->group_leader->real_parent,
757 spin_lock(&p->sighand->siglock);
760 * We are not stopped, but there could be a stop
761 * signal in the middle of being processed after
762 * being removed from the queue. Clear that too.
764 p->signal->flags = 0;
766 } else if (sig == SIGKILL) {
768 * Make sure that any pending stop signal already dequeued
769 * is undone by the wakeup for SIGKILL.
771 p->signal->flags = 0;
775 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
776 struct sigpending *signals)
778 struct sigqueue * q = NULL;
782 * fast-pathed signals for kernel-internal things like SIGSTOP
785 if ((unsigned long)info == 2)
788 /* Real-time signals must be queued if sent by sigqueue, or
789 some other real-time mechanism. It is implementation
790 defined whether kill() does so. We attempt to do so, on
791 the principle of least surprise, but since kill is not
792 allowed to fail with EAGAIN when low on memory we just
793 make sure at least one signal gets delivered and don't
794 pass on the info struct. */
796 q = __sigqueue_alloc(t, GFP_ATOMIC);
798 list_add_tail(&q->list, &signals->list);
799 switch ((unsigned long) info) {
801 q->info.si_signo = sig;
802 q->info.si_errno = 0;
803 q->info.si_code = SI_USER;
804 q->info.si_pid = current->pid;
805 q->info.si_uid = current->uid;
808 q->info.si_signo = sig;
809 q->info.si_errno = 0;
810 q->info.si_code = SI_KERNEL;
815 copy_siginfo(&q->info, info);
819 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
820 && info->si_code != SI_USER)
822 * Queue overflow, abort. We may abort if the signal was rt
823 * and sent by user using something other than kill().
826 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
828 * Set up a return to indicate that we dropped
831 ret = info->si_sys_private;
835 sigaddset(&signals->signal, sig);
839 #define LEGACY_QUEUE(sigptr, sig) \
840 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
844 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
848 if (!irqs_disabled())
851 if (!spin_is_locked(&t->sighand->siglock))
855 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
857 * Set up a return to indicate that we dropped the signal.
859 ret = info->si_sys_private;
861 /* Short-circuit ignored signals. */
862 if (sig_ignored(t, sig))
865 /* Support queueing exactly one non-rt signal, so that we
866 can get more detailed information about the cause of
868 if (LEGACY_QUEUE(&t->pending, sig))
871 ret = send_signal(sig, info, t, &t->pending);
872 if (!ret && !sigismember(&t->blocked, sig))
873 signal_wake_up(t, sig == SIGKILL);
879 * Force a signal that the process can't ignore: if necessary
880 * we unblock the signal and change any SIG_IGN to SIG_DFL.
884 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
886 unsigned long int flags;
889 spin_lock_irqsave(&t->sighand->siglock, flags);
890 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
891 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
892 sigdelset(&t->blocked, sig);
893 recalc_sigpending_tsk(t);
895 ret = specific_send_sig_info(sig, info, t);
896 spin_unlock_irqrestore(&t->sighand->siglock, flags);
902 force_sig_specific(int sig, struct task_struct *t)
904 unsigned long int flags;
906 spin_lock_irqsave(&t->sighand->siglock, flags);
907 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
908 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
909 sigdelset(&t->blocked, sig);
910 recalc_sigpending_tsk(t);
911 specific_send_sig_info(sig, (void *)2, t);
912 spin_unlock_irqrestore(&t->sighand->siglock, flags);
916 * Test if P wants to take SIG. After we've checked all threads with this,
917 * it's equivalent to finding no threads not blocking SIG. Any threads not
918 * blocking SIG were ruled out because they are not running and already
919 * have pending signals. Such threads will dequeue from the shared queue
920 * as soon as they're available, so putting the signal on the shared queue
921 * will be equivalent to sending it to one such thread.
923 #define wants_signal(sig, p, mask) \
924 (!sigismember(&(p)->blocked, sig) \
925 && !((p)->state & mask) \
926 && !((p)->flags & PF_EXITING) \
927 && (task_curr(p) || !signal_pending(p)))
931 __group_complete_signal(int sig, struct task_struct *p)
934 struct task_struct *t;
937 * Don't bother traced and stopped tasks (but
938 * SIGKILL will punch through that).
940 mask = TASK_STOPPED | TASK_TRACED;
945 * Now find a thread we can wake up to take the signal off the queue.
947 * If the main thread wants the signal, it gets first crack.
948 * Probably the least surprising to the average bear.
950 if (wants_signal(sig, p, mask))
952 else if (thread_group_empty(p))
954 * There is just one thread and it does not need to be woken.
955 * It will dequeue unblocked signals before it runs again.
960 * Otherwise try to find a suitable thread.
962 t = p->signal->curr_target;
964 /* restart balancing at this thread */
965 t = p->signal->curr_target = p;
966 BUG_ON(t->tgid != p->tgid);
968 while (!wants_signal(sig, t, mask)) {
970 if (t == p->signal->curr_target)
972 * No thread needs to be woken.
973 * Any eligible threads will see
974 * the signal in the queue soon.
978 p->signal->curr_target = t;
982 * Found a killable thread. If the signal will be fatal,
983 * then start taking the whole group down immediately.
985 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
986 !sigismember(&t->real_blocked, sig) &&
987 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
989 * This signal will be fatal to the whole group.
991 if (!sig_kernel_coredump(sig)) {
993 * Start a group exit and wake everybody up.
994 * This way we don't have other threads
995 * running and doing things after a slower
996 * thread has the fatal signal pending.
998 p->signal->flags = SIGNAL_GROUP_EXIT;
999 p->signal->group_exit_code = sig;
1000 p->signal->group_stop_count = 0;
1003 sigaddset(&t->pending.signal, SIGKILL);
1004 signal_wake_up(t, 1);
1011 * There will be a core dump. We make all threads other
1012 * than the chosen one go into a group stop so that nothing
1013 * happens until it gets scheduled, takes the signal off
1014 * the shared queue, and does the core dump. This is a
1015 * little more complicated than strictly necessary, but it
1016 * keeps the signal state that winds up in the core dump
1017 * unchanged from the death state, e.g. which thread had
1018 * the core-dump signal unblocked.
1020 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1021 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1022 p->signal->group_stop_count = 0;
1023 p->signal->group_exit_task = t;
1026 p->signal->group_stop_count++;
1027 signal_wake_up(t, 0);
1030 wake_up_process(p->signal->group_exit_task);
1035 * The signal is already in the shared-pending queue.
1036 * Tell the chosen thread to wake up and dequeue it.
1038 signal_wake_up(t, sig == SIGKILL);
1043 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1048 if (!spin_is_locked(&p->sighand->siglock))
1051 handle_stop_signal(sig, p);
1053 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1055 * Set up a return to indicate that we dropped the signal.
1057 ret = info->si_sys_private;
1059 /* Short-circuit ignored signals. */
1060 if (sig_ignored(p, sig))
1063 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1064 /* This is a non-RT signal and we already have one queued. */
1068 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1069 * We always use the shared queue for process-wide signals,
1070 * to avoid several races.
1072 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1076 __group_complete_signal(sig, p);
1081 * Nuke all other threads in the group.
1083 void zap_other_threads(struct task_struct *p)
1085 struct task_struct *t;
1087 p->signal->flags = SIGNAL_GROUP_EXIT;
1088 p->signal->group_stop_count = 0;
1090 if (thread_group_empty(p))
1093 for (t = next_thread(p); t != p; t = next_thread(t)) {
1095 * Don't bother with already dead threads
1101 * We don't want to notify the parent, since we are
1102 * killed as part of a thread group due to another
1103 * thread doing an execve() or similar. So set the
1104 * exit signal to -1 to allow immediate reaping of
1105 * the process. But don't detach the thread group
1108 if (t != p->group_leader)
1109 t->exit_signal = -1;
1111 sigaddset(&t->pending.signal, SIGKILL);
1112 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1113 signal_wake_up(t, 1);
1118 * Must be called with the tasklist_lock held for reading!
1120 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1122 unsigned long flags;
1125 ret = check_kill_permission(sig, info, p);
1126 if (!ret && sig && p->sighand) {
1127 spin_lock_irqsave(&p->sighand->siglock, flags);
1128 ret = __group_send_sig_info(sig, info, p);
1129 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1136 * kill_pg_info() sends a signal to a process group: this is what the tty
1137 * control characters do (^C, ^Z etc)
1140 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1142 struct task_struct *p = NULL;
1143 int retval, success;
1150 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1151 int err = group_send_sig_info(sig, info, p);
1154 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1155 return success ? 0 : retval;
1159 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1163 read_lock(&tasklist_lock);
1164 retval = __kill_pg_info(sig, info, pgrp);
1165 read_unlock(&tasklist_lock);
1171 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1174 struct task_struct *p;
1176 read_lock(&tasklist_lock);
1177 p = find_task_by_pid(pid);
1180 error = group_send_sig_info(sig, info, p);
1181 read_unlock(&tasklist_lock);
1187 * kill_something_info() interprets pid in interesting ways just like kill(2).
1189 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1190 * is probably wrong. Should make it like BSD or SYSV.
1193 static int kill_something_info(int sig, struct siginfo *info, int pid)
1196 return kill_pg_info(sig, info, process_group(current));
1197 } else if (pid == -1) {
1198 int retval = 0, count = 0;
1199 struct task_struct * p;
1201 read_lock(&tasklist_lock);
1202 for_each_process(p) {
1203 if (p->pid > 1 && p->tgid != current->tgid) {
1204 int err = group_send_sig_info(sig, info, p);
1210 read_unlock(&tasklist_lock);
1211 return count ? retval : -ESRCH;
1212 } else if (pid < 0) {
1213 return kill_pg_info(sig, info, -pid);
1215 return kill_proc_info(sig, info, pid);
1220 * These are for backward compatibility with the rest of the kernel source.
1224 * These two are the most common entry points. They send a signal
1225 * just to the specific thread.
1228 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1231 unsigned long flags;
1234 * Make sure legacy kernel users don't send in bad values
1235 * (normal paths check this in check_kill_permission).
1237 if (sig < 0 || sig > _NSIG)
1241 * We need the tasklist lock even for the specific
1242 * thread case (when we don't need to follow the group
1243 * lists) in order to avoid races with "p->sighand"
1244 * going away or changing from under us.
1246 read_lock(&tasklist_lock);
1247 spin_lock_irqsave(&p->sighand->siglock, flags);
1248 ret = specific_send_sig_info(sig, info, p);
1249 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1250 read_unlock(&tasklist_lock);
1255 send_sig(int sig, struct task_struct *p, int priv)
1257 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1261 * This is the entry point for "process-wide" signals.
1262 * They will go to an appropriate thread in the thread group.
1265 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1268 read_lock(&tasklist_lock);
1269 ret = group_send_sig_info(sig, info, p);
1270 read_unlock(&tasklist_lock);
1275 force_sig(int sig, struct task_struct *p)
1277 force_sig_info(sig, (void*)1L, p);
1281 * When things go south during signal handling, we
1282 * will force a SIGSEGV. And if the signal that caused
1283 * the problem was already a SIGSEGV, we'll want to
1284 * make sure we don't even try to deliver the signal..
1287 force_sigsegv(int sig, struct task_struct *p)
1289 if (sig == SIGSEGV) {
1290 unsigned long flags;
1291 spin_lock_irqsave(&p->sighand->siglock, flags);
1292 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1293 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1295 force_sig(SIGSEGV, p);
1300 kill_pg(pid_t pgrp, int sig, int priv)
1302 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1306 kill_proc(pid_t pid, int sig, int priv)
1308 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1312 * These functions support sending signals using preallocated sigqueue
1313 * structures. This is needed "because realtime applications cannot
1314 * afford to lose notifications of asynchronous events, like timer
1315 * expirations or I/O completions". In the case of Posix Timers
1316 * we allocate the sigqueue structure from the timer_create. If this
1317 * allocation fails we are able to report the failure to the application
1318 * with an EAGAIN error.
1321 struct sigqueue *sigqueue_alloc(void)
1325 if ((q = __sigqueue_alloc(current, GFP_KERNEL)))
1326 q->flags |= SIGQUEUE_PREALLOC;
1330 void sigqueue_free(struct sigqueue *q)
1332 unsigned long flags;
1333 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1335 * If the signal is still pending remove it from the
1338 if (unlikely(!list_empty(&q->list))) {
1339 read_lock(&tasklist_lock);
1340 spin_lock_irqsave(q->lock, flags);
1341 if (!list_empty(&q->list))
1342 list_del_init(&q->list);
1343 spin_unlock_irqrestore(q->lock, flags);
1344 read_unlock(&tasklist_lock);
1346 q->flags &= ~SIGQUEUE_PREALLOC;
1351 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1353 unsigned long flags;
1357 * We need the tasklist lock even for the specific
1358 * thread case (when we don't need to follow the group
1359 * lists) in order to avoid races with "p->sighand"
1360 * going away or changing from under us.
1362 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1363 read_lock(&tasklist_lock);
1364 spin_lock_irqsave(&p->sighand->siglock, flags);
1366 if (unlikely(!list_empty(&q->list))) {
1368 * If an SI_TIMER entry is already queue just increment
1369 * the overrun count.
1371 if (q->info.si_code != SI_TIMER)
1373 q->info.si_overrun++;
1376 /* Short-circuit ignored signals. */
1377 if (sig_ignored(p, sig)) {
1382 q->lock = &p->sighand->siglock;
1383 list_add_tail(&q->list, &p->pending.list);
1384 sigaddset(&p->pending.signal, sig);
1385 if (!sigismember(&p->blocked, sig))
1386 signal_wake_up(p, sig == SIGKILL);
1389 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1390 read_unlock(&tasklist_lock);
1395 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1397 unsigned long flags;
1400 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1401 read_lock(&tasklist_lock);
1402 spin_lock_irqsave(&p->sighand->siglock, flags);
1403 handle_stop_signal(sig, p);
1405 /* Short-circuit ignored signals. */
1406 if (sig_ignored(p, sig)) {
1411 if (unlikely(!list_empty(&q->list))) {
1413 * If an SI_TIMER entry is already queue just increment
1414 * the overrun count. Other uses should not try to
1415 * send the signal multiple times.
1417 if (q->info.si_code != SI_TIMER)
1419 q->info.si_overrun++;
1424 * Put this signal on the shared-pending queue.
1425 * We always use the shared queue for process-wide signals,
1426 * to avoid several races.
1428 q->lock = &p->sighand->siglock;
1429 list_add_tail(&q->list, &p->signal->shared_pending.list);
1430 sigaddset(&p->signal->shared_pending.signal, sig);
1432 __group_complete_signal(sig, p);
1434 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1435 read_unlock(&tasklist_lock);
1440 * Wake up any threads in the parent blocked in wait* syscalls.
1442 static inline void __wake_up_parent(struct task_struct *p,
1443 struct task_struct *parent)
1445 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1449 * Let a parent know about the death of a child.
1450 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1453 void do_notify_parent(struct task_struct *tsk, int sig)
1455 struct siginfo info;
1456 unsigned long flags;
1457 struct sighand_struct *psig;
1461 /* do_notify_parent_cldstop should have been called instead. */
1462 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1464 BUG_ON(!tsk->ptrace &&
1465 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1467 info.si_signo = sig;
1469 info.si_pid = tsk->pid;
1470 info.si_uid = tsk->uid;
1472 /* FIXME: find out whether or not this is supposed to be c*time. */
1473 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1474 tsk->signal->utime));
1475 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1476 tsk->signal->stime));
1478 info.si_status = tsk->exit_code & 0x7f;
1479 if (tsk->exit_code & 0x80)
1480 info.si_code = CLD_DUMPED;
1481 else if (tsk->exit_code & 0x7f)
1482 info.si_code = CLD_KILLED;
1484 info.si_code = CLD_EXITED;
1485 info.si_status = tsk->exit_code >> 8;
1488 psig = tsk->parent->sighand;
1489 spin_lock_irqsave(&psig->siglock, flags);
1490 if (sig == SIGCHLD &&
1491 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1492 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1494 * We are exiting and our parent doesn't care. POSIX.1
1495 * defines special semantics for setting SIGCHLD to SIG_IGN
1496 * or setting the SA_NOCLDWAIT flag: we should be reaped
1497 * automatically and not left for our parent's wait4 call.
1498 * Rather than having the parent do it as a magic kind of
1499 * signal handler, we just set this to tell do_exit that we
1500 * can be cleaned up without becoming a zombie. Note that
1501 * we still call __wake_up_parent in this case, because a
1502 * blocked sys_wait4 might now return -ECHILD.
1504 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1505 * is implementation-defined: we do (if you don't want
1506 * it, just use SIG_IGN instead).
1508 tsk->exit_signal = -1;
1509 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1512 if (sig > 0 && sig <= _NSIG)
1513 __group_send_sig_info(sig, &info, tsk->parent);
1514 __wake_up_parent(tsk, tsk->parent);
1515 spin_unlock_irqrestore(&psig->siglock, flags);
1519 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1522 struct siginfo info;
1523 unsigned long flags;
1524 struct sighand_struct *sighand;
1526 info.si_signo = SIGCHLD;
1528 info.si_pid = tsk->pid;
1529 info.si_uid = tsk->uid;
1531 /* FIXME: find out whether or not this is supposed to be c*time. */
1532 info.si_utime = cputime_to_jiffies(tsk->utime);
1533 info.si_stime = cputime_to_jiffies(tsk->stime);
1538 info.si_status = SIGCONT;
1541 info.si_status = tsk->signal->group_exit_code & 0x7f;
1544 info.si_status = tsk->exit_code & 0x7f;
1550 sighand = parent->sighand;
1551 spin_lock_irqsave(&sighand->siglock, flags);
1552 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1553 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1554 __group_send_sig_info(SIGCHLD, &info, parent);
1556 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1558 __wake_up_parent(tsk, parent);
1559 spin_unlock_irqrestore(&sighand->siglock, flags);
1563 * This must be called with current->sighand->siglock held.
1565 * This should be the path for all ptrace stops.
1566 * We always set current->last_siginfo while stopped here.
1567 * That makes it a way to test a stopped process for
1568 * being ptrace-stopped vs being job-control-stopped.
1570 * If we actually decide not to stop at all because the tracer is gone,
1571 * we leave nostop_code in current->exit_code.
1573 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1576 * If there is a group stop in progress,
1577 * we must participate in the bookkeeping.
1579 if (current->signal->group_stop_count > 0)
1580 --current->signal->group_stop_count;
1582 current->last_siginfo = info;
1583 current->exit_code = exit_code;
1585 /* Let the debugger run. */
1586 set_current_state(TASK_TRACED);
1587 spin_unlock_irq(¤t->sighand->siglock);
1588 read_lock(&tasklist_lock);
1589 if (likely(current->ptrace & PT_PTRACED) &&
1590 likely(current->parent != current->real_parent ||
1591 !(current->ptrace & PT_ATTACHED)) &&
1592 (likely(current->parent->signal != current->signal) ||
1593 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1594 do_notify_parent_cldstop(current, current->parent,
1596 read_unlock(&tasklist_lock);
1600 * By the time we got the lock, our tracer went away.
1603 read_unlock(&tasklist_lock);
1604 set_current_state(TASK_RUNNING);
1605 current->exit_code = nostop_code;
1609 * We are back. Now reacquire the siglock before touching
1610 * last_siginfo, so that we are sure to have synchronized with
1611 * any signal-sending on another CPU that wants to examine it.
1613 spin_lock_irq(¤t->sighand->siglock);
1614 current->last_siginfo = NULL;
1617 * Queued signals ignored us while we were stopped for tracing.
1618 * So check for any that we should take before resuming user mode.
1620 recalc_sigpending();
1623 void ptrace_notify(int exit_code)
1627 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1629 memset(&info, 0, sizeof info);
1630 info.si_signo = SIGTRAP;
1631 info.si_code = exit_code;
1632 info.si_pid = current->pid;
1633 info.si_uid = current->uid;
1635 /* Let the debugger run. */
1636 spin_lock_irq(¤t->sighand->siglock);
1637 ptrace_stop(exit_code, 0, &info);
1638 spin_unlock_irq(¤t->sighand->siglock);
1641 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1644 finish_stop(int stop_count)
1647 * If there are no other threads in the group, or if there is
1648 * a group stop in progress and we are the last to stop,
1649 * report to the parent. When ptraced, every thread reports itself.
1651 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1652 read_lock(&tasklist_lock);
1653 do_notify_parent_cldstop(current, current->parent,
1655 read_unlock(&tasklist_lock);
1657 else if (stop_count == 0) {
1658 read_lock(&tasklist_lock);
1659 do_notify_parent_cldstop(current->group_leader,
1660 current->group_leader->real_parent,
1662 read_unlock(&tasklist_lock);
1667 * Now we don't run again until continued.
1669 current->exit_code = 0;
1673 * This performs the stopping for SIGSTOP and other stop signals.
1674 * We have to stop all threads in the thread group.
1675 * Returns nonzero if we've actually stopped and released the siglock.
1676 * Returns zero if we didn't stop and still hold the siglock.
1679 do_signal_stop(int signr)
1681 struct signal_struct *sig = current->signal;
1682 struct sighand_struct *sighand = current->sighand;
1683 int stop_count = -1;
1685 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1688 if (sig->group_stop_count > 0) {
1690 * There is a group stop in progress. We don't need to
1691 * start another one.
1693 signr = sig->group_exit_code;
1694 stop_count = --sig->group_stop_count;
1695 current->exit_code = signr;
1696 set_current_state(TASK_STOPPED);
1697 if (stop_count == 0)
1698 sig->flags = SIGNAL_STOP_STOPPED;
1699 spin_unlock_irq(&sighand->siglock);
1701 else if (thread_group_empty(current)) {
1703 * Lock must be held through transition to stopped state.
1705 current->exit_code = current->signal->group_exit_code = signr;
1706 set_current_state(TASK_STOPPED);
1707 sig->flags = SIGNAL_STOP_STOPPED;
1708 spin_unlock_irq(&sighand->siglock);
1712 * There is no group stop already in progress.
1713 * We must initiate one now, but that requires
1714 * dropping siglock to get both the tasklist lock
1715 * and siglock again in the proper order. Note that
1716 * this allows an intervening SIGCONT to be posted.
1717 * We need to check for that and bail out if necessary.
1719 struct task_struct *t;
1721 spin_unlock_irq(&sighand->siglock);
1723 /* signals can be posted during this window */
1725 read_lock(&tasklist_lock);
1726 spin_lock_irq(&sighand->siglock);
1728 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1730 * Another stop or continue happened while we
1731 * didn't have the lock. We can just swallow this
1732 * signal now. If we raced with a SIGCONT, that
1733 * should have just cleared it now. If we raced
1734 * with another processor delivering a stop signal,
1735 * then the SIGCONT that wakes us up should clear it.
1740 if (sig->group_stop_count == 0) {
1741 sig->group_exit_code = signr;
1743 for (t = next_thread(current); t != current;
1746 * Setting state to TASK_STOPPED for a group
1747 * stop is always done with the siglock held,
1748 * so this check has no races.
1750 if (t->state < TASK_STOPPED) {
1752 signal_wake_up(t, 0);
1754 sig->group_stop_count = stop_count;
1757 /* A race with another thread while unlocked. */
1758 signr = sig->group_exit_code;
1759 stop_count = --sig->group_stop_count;
1762 current->exit_code = signr;
1763 set_current_state(TASK_STOPPED);
1764 if (stop_count == 0)
1765 sig->flags = SIGNAL_STOP_STOPPED;
1767 spin_unlock_irq(&sighand->siglock);
1768 read_unlock(&tasklist_lock);
1771 finish_stop(stop_count);
1776 * Do appropriate magic when group_stop_count > 0.
1777 * We return nonzero if we stopped, after releasing the siglock.
1778 * We return zero if we still hold the siglock and should look
1779 * for another signal without checking group_stop_count again.
1781 static inline int handle_group_stop(void)
1785 if (current->signal->group_exit_task == current) {
1787 * Group stop is so we can do a core dump,
1788 * We are the initiating thread, so get on with it.
1790 current->signal->group_exit_task = NULL;
1794 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1796 * Group stop is so another thread can do a core dump,
1797 * or else we are racing against a death signal.
1798 * Just punt the stop so we can get the next signal.
1803 * There is a group stop in progress. We stop
1804 * without any associated signal being in our queue.
1806 stop_count = --current->signal->group_stop_count;
1807 if (stop_count == 0)
1808 current->signal->flags = SIGNAL_STOP_STOPPED;
1809 current->exit_code = current->signal->group_exit_code;
1810 set_current_state(TASK_STOPPED);
1811 spin_unlock_irq(¤t->sighand->siglock);
1812 finish_stop(stop_count);
1816 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1817 struct pt_regs *regs, void *cookie)
1819 sigset_t *mask = ¤t->blocked;
1823 spin_lock_irq(¤t->sighand->siglock);
1825 struct k_sigaction *ka;
1827 if (unlikely(current->signal->group_stop_count > 0) &&
1828 handle_group_stop())
1831 signr = dequeue_signal(current, mask, info);
1834 break; /* will return 0 */
1836 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1837 ptrace_signal_deliver(regs, cookie);
1839 /* Let the debugger run. */
1840 ptrace_stop(signr, signr, info);
1842 /* We're back. Did the debugger cancel the sig? */
1843 signr = current->exit_code;
1847 current->exit_code = 0;
1849 /* Update the siginfo structure if the signal has
1850 changed. If the debugger wanted something
1851 specific in the siginfo structure then it should
1852 have updated *info via PTRACE_SETSIGINFO. */
1853 if (signr != info->si_signo) {
1854 info->si_signo = signr;
1856 info->si_code = SI_USER;
1857 info->si_pid = current->parent->pid;
1858 info->si_uid = current->parent->uid;
1861 /* If the (new) signal is now blocked, requeue it. */
1862 if (sigismember(¤t->blocked, signr)) {
1863 specific_send_sig_info(signr, info, current);
1868 ka = ¤t->sighand->action[signr-1];
1869 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1871 if (ka->sa.sa_handler != SIG_DFL) {
1872 /* Run the handler. */
1875 if (ka->sa.sa_flags & SA_ONESHOT)
1876 ka->sa.sa_handler = SIG_DFL;
1878 break; /* will return non-zero "signr" value */
1882 * Now we are doing the default action for this signal.
1884 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1887 /* Init gets no signals it doesn't want. */
1888 if (current->pid == 1)
1891 if (sig_kernel_stop(signr)) {
1893 * The default action is to stop all threads in
1894 * the thread group. The job control signals
1895 * do nothing in an orphaned pgrp, but SIGSTOP
1896 * always works. Note that siglock needs to be
1897 * dropped during the call to is_orphaned_pgrp()
1898 * because of lock ordering with tasklist_lock.
1899 * This allows an intervening SIGCONT to be posted.
1900 * We need to check for that and bail out if necessary.
1902 if (signr != SIGSTOP) {
1903 spin_unlock_irq(¤t->sighand->siglock);
1905 /* signals can be posted during this window */
1907 if (is_orphaned_pgrp(process_group(current)))
1910 spin_lock_irq(¤t->sighand->siglock);
1913 if (likely(do_signal_stop(signr))) {
1914 /* It released the siglock. */
1919 * We didn't actually stop, due to a race
1920 * with SIGCONT or something like that.
1925 spin_unlock_irq(¤t->sighand->siglock);
1928 * Anything else is fatal, maybe with a core dump.
1930 current->flags |= PF_SIGNALED;
1931 if (sig_kernel_coredump(signr)) {
1933 * If it was able to dump core, this kills all
1934 * other threads in the group and synchronizes with
1935 * their demise. If we lost the race with another
1936 * thread getting here, it set group_exit_code
1937 * first and our do_group_exit call below will use
1938 * that value and ignore the one we pass it.
1940 do_coredump((long)signr, signr, regs);
1944 * Death signals, no core dump.
1946 do_group_exit(signr);
1949 spin_unlock_irq(¤t->sighand->siglock);
1955 EXPORT_SYMBOL(recalc_sigpending);
1956 EXPORT_SYMBOL_GPL(dequeue_signal);
1957 EXPORT_SYMBOL(flush_signals);
1958 EXPORT_SYMBOL(force_sig);
1959 EXPORT_SYMBOL(kill_pg);
1960 EXPORT_SYMBOL(kill_proc);
1961 EXPORT_SYMBOL(ptrace_notify);
1962 EXPORT_SYMBOL(send_sig);
1963 EXPORT_SYMBOL(send_sig_info);
1964 EXPORT_SYMBOL(sigprocmask);
1965 EXPORT_SYMBOL(block_all_signals);
1966 EXPORT_SYMBOL(unblock_all_signals);
1970 * System call entry points.
1973 asmlinkage long sys_restart_syscall(void)
1975 struct restart_block *restart = ¤t_thread_info()->restart_block;
1976 return restart->fn(restart);
1979 long do_no_restart_syscall(struct restart_block *param)
1985 * We don't need to get the kernel lock - this is all local to this
1986 * particular thread.. (and that's good, because this is _heavily_
1987 * used by various programs)
1991 * This is also useful for kernel threads that want to temporarily
1992 * (or permanently) block certain signals.
1994 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1995 * interface happily blocks "unblockable" signals like SIGKILL
1998 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2003 spin_lock_irq(¤t->sighand->siglock);
2004 old_block = current->blocked;
2008 sigorsets(¤t->blocked, ¤t->blocked, set);
2011 signandsets(¤t->blocked, ¤t->blocked, set);
2014 current->blocked = *set;
2019 recalc_sigpending();
2020 spin_unlock_irq(¤t->sighand->siglock);
2022 *oldset = old_block;
2027 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2029 int error = -EINVAL;
2030 sigset_t old_set, new_set;
2032 /* XXX: Don't preclude handling different sized sigset_t's. */
2033 if (sigsetsize != sizeof(sigset_t))
2038 if (copy_from_user(&new_set, set, sizeof(*set)))
2040 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2042 error = sigprocmask(how, &new_set, &old_set);
2048 spin_lock_irq(¤t->sighand->siglock);
2049 old_set = current->blocked;
2050 spin_unlock_irq(¤t->sighand->siglock);
2054 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2062 long do_sigpending(void __user *set, unsigned long sigsetsize)
2064 long error = -EINVAL;
2067 if (sigsetsize > sizeof(sigset_t))
2070 spin_lock_irq(¤t->sighand->siglock);
2071 sigorsets(&pending, ¤t->pending.signal,
2072 ¤t->signal->shared_pending.signal);
2073 spin_unlock_irq(¤t->sighand->siglock);
2075 /* Outside the lock because only this thread touches it. */
2076 sigandsets(&pending, ¤t->blocked, &pending);
2079 if (!copy_to_user(set, &pending, sigsetsize))
2087 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2089 return do_sigpending(set, sigsetsize);
2092 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2094 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2098 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2100 if (from->si_code < 0)
2101 return __copy_to_user(to, from, sizeof(siginfo_t))
2104 * If you change siginfo_t structure, please be sure
2105 * this code is fixed accordingly.
2106 * It should never copy any pad contained in the structure
2107 * to avoid security leaks, but must copy the generic
2108 * 3 ints plus the relevant union member.
2110 err = __put_user(from->si_signo, &to->si_signo);
2111 err |= __put_user(from->si_errno, &to->si_errno);
2112 err |= __put_user((short)from->si_code, &to->si_code);
2113 switch (from->si_code & __SI_MASK) {
2115 err |= __put_user(from->si_pid, &to->si_pid);
2116 err |= __put_user(from->si_uid, &to->si_uid);
2119 err |= __put_user(from->si_tid, &to->si_tid);
2120 err |= __put_user(from->si_overrun, &to->si_overrun);
2121 err |= __put_user(from->si_ptr, &to->si_ptr);
2124 err |= __put_user(from->si_band, &to->si_band);
2125 err |= __put_user(from->si_fd, &to->si_fd);
2128 err |= __put_user(from->si_addr, &to->si_addr);
2129 #ifdef __ARCH_SI_TRAPNO
2130 err |= __put_user(from->si_trapno, &to->si_trapno);
2134 err |= __put_user(from->si_pid, &to->si_pid);
2135 err |= __put_user(from->si_uid, &to->si_uid);
2136 err |= __put_user(from->si_status, &to->si_status);
2137 err |= __put_user(from->si_utime, &to->si_utime);
2138 err |= __put_user(from->si_stime, &to->si_stime);
2140 case __SI_RT: /* This is not generated by the kernel as of now. */
2141 case __SI_MESGQ: /* But this is */
2142 err |= __put_user(from->si_pid, &to->si_pid);
2143 err |= __put_user(from->si_uid, &to->si_uid);
2144 err |= __put_user(from->si_ptr, &to->si_ptr);
2146 default: /* this is just in case for now ... */
2147 err |= __put_user(from->si_pid, &to->si_pid);
2148 err |= __put_user(from->si_uid, &to->si_uid);
2157 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2158 siginfo_t __user *uinfo,
2159 const struct timespec __user *uts,
2168 /* XXX: Don't preclude handling different sized sigset_t's. */
2169 if (sigsetsize != sizeof(sigset_t))
2172 if (copy_from_user(&these, uthese, sizeof(these)))
2176 * Invert the set of allowed signals to get those we
2179 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2183 if (copy_from_user(&ts, uts, sizeof(ts)))
2185 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2190 spin_lock_irq(¤t->sighand->siglock);
2191 sig = dequeue_signal(current, &these, &info);
2193 timeout = MAX_SCHEDULE_TIMEOUT;
2195 timeout = (timespec_to_jiffies(&ts)
2196 + (ts.tv_sec || ts.tv_nsec));
2199 /* None ready -- temporarily unblock those we're
2200 * interested while we are sleeping in so that we'll
2201 * be awakened when they arrive. */
2202 current->real_blocked = current->blocked;
2203 sigandsets(¤t->blocked, ¤t->blocked, &these);
2204 recalc_sigpending();
2205 spin_unlock_irq(¤t->sighand->siglock);
2207 current->state = TASK_INTERRUPTIBLE;
2208 timeout = schedule_timeout(timeout);
2210 spin_lock_irq(¤t->sighand->siglock);
2211 sig = dequeue_signal(current, &these, &info);
2212 current->blocked = current->real_blocked;
2213 siginitset(¤t->real_blocked, 0);
2214 recalc_sigpending();
2217 spin_unlock_irq(¤t->sighand->siglock);
2222 if (copy_siginfo_to_user(uinfo, &info))
2235 sys_kill(int pid, int sig)
2237 struct siginfo info;
2239 info.si_signo = sig;
2241 info.si_code = SI_USER;
2242 info.si_pid = current->tgid;
2243 info.si_uid = current->uid;
2245 return kill_something_info(sig, &info, pid);
2249 * sys_tgkill - send signal to one specific thread
2250 * @tgid: the thread group ID of the thread
2251 * @pid: the PID of the thread
2252 * @sig: signal to be sent
2254 * This syscall also checks the tgid and returns -ESRCH even if the PID
2255 * exists but it's not belonging to the target process anymore. This
2256 * method solves the problem of threads exiting and PIDs getting reused.
2258 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2260 struct siginfo info;
2262 struct task_struct *p;
2264 /* This is only valid for single tasks */
2265 if (pid <= 0 || tgid <= 0)
2268 info.si_signo = sig;
2270 info.si_code = SI_TKILL;
2271 info.si_pid = current->tgid;
2272 info.si_uid = current->uid;
2274 read_lock(&tasklist_lock);
2275 p = find_task_by_pid(pid);
2277 if (p && (p->tgid == tgid)) {
2278 error = check_kill_permission(sig, &info, p);
2280 * The null signal is a permissions and process existence
2281 * probe. No signal is actually delivered.
2283 if (!error && sig && p->sighand) {
2284 spin_lock_irq(&p->sighand->siglock);
2285 handle_stop_signal(sig, p);
2286 error = specific_send_sig_info(sig, &info, p);
2287 spin_unlock_irq(&p->sighand->siglock);
2290 read_unlock(&tasklist_lock);
2295 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2298 sys_tkill(int pid, int sig)
2300 struct siginfo info;
2302 struct task_struct *p;
2304 /* This is only valid for single tasks */
2308 info.si_signo = sig;
2310 info.si_code = SI_TKILL;
2311 info.si_pid = current->tgid;
2312 info.si_uid = current->uid;
2314 read_lock(&tasklist_lock);
2315 p = find_task_by_pid(pid);
2318 error = check_kill_permission(sig, &info, p);
2320 * The null signal is a permissions and process existence
2321 * probe. No signal is actually delivered.
2323 if (!error && sig && p->sighand) {
2324 spin_lock_irq(&p->sighand->siglock);
2325 handle_stop_signal(sig, p);
2326 error = specific_send_sig_info(sig, &info, p);
2327 spin_unlock_irq(&p->sighand->siglock);
2330 read_unlock(&tasklist_lock);
2335 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2339 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2342 /* Not even root can pretend to send signals from the kernel.
2343 Nor can they impersonate a kill(), which adds source info. */
2344 if (info.si_code >= 0)
2346 info.si_signo = sig;
2348 /* POSIX.1b doesn't mention process groups. */
2349 return kill_proc_info(sig, &info, pid);
2353 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2355 struct k_sigaction *k;
2357 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2360 k = ¤t->sighand->action[sig-1];
2362 spin_lock_irq(¤t->sighand->siglock);
2363 if (signal_pending(current)) {
2365 * If there might be a fatal signal pending on multiple
2366 * threads, make sure we take it before changing the action.
2368 spin_unlock_irq(¤t->sighand->siglock);
2369 return -ERESTARTNOINTR;
2378 * "Setting a signal action to SIG_IGN for a signal that is
2379 * pending shall cause the pending signal to be discarded,
2380 * whether or not it is blocked."
2382 * "Setting a signal action to SIG_DFL for a signal that is
2383 * pending and whose default action is to ignore the signal
2384 * (for example, SIGCHLD), shall cause the pending signal to
2385 * be discarded, whether or not it is blocked"
2387 if (act->sa.sa_handler == SIG_IGN ||
2388 (act->sa.sa_handler == SIG_DFL &&
2389 sig_kernel_ignore(sig))) {
2391 * This is a fairly rare case, so we only take the
2392 * tasklist_lock once we're sure we'll need it.
2393 * Now we must do this little unlock and relock
2394 * dance to maintain the lock hierarchy.
2396 struct task_struct *t = current;
2397 spin_unlock_irq(&t->sighand->siglock);
2398 read_lock(&tasklist_lock);
2399 spin_lock_irq(&t->sighand->siglock);
2401 sigdelsetmask(&k->sa.sa_mask,
2402 sigmask(SIGKILL) | sigmask(SIGSTOP));
2403 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2405 rm_from_queue(sigmask(sig), &t->pending);
2406 recalc_sigpending_tsk(t);
2408 } while (t != current);
2409 spin_unlock_irq(¤t->sighand->siglock);
2410 read_unlock(&tasklist_lock);
2415 sigdelsetmask(&k->sa.sa_mask,
2416 sigmask(SIGKILL) | sigmask(SIGSTOP));
2419 spin_unlock_irq(¤t->sighand->siglock);
2424 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2430 oss.ss_sp = (void __user *) current->sas_ss_sp;
2431 oss.ss_size = current->sas_ss_size;
2432 oss.ss_flags = sas_ss_flags(sp);
2441 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2442 || __get_user(ss_sp, &uss->ss_sp)
2443 || __get_user(ss_flags, &uss->ss_flags)
2444 || __get_user(ss_size, &uss->ss_size))
2448 if (on_sig_stack(sp))
2454 * Note - this code used to test ss_flags incorrectly
2455 * old code may have been written using ss_flags==0
2456 * to mean ss_flags==SS_ONSTACK (as this was the only
2457 * way that worked) - this fix preserves that older
2460 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2463 if (ss_flags == SS_DISABLE) {
2468 if (ss_size < MINSIGSTKSZ)
2472 current->sas_ss_sp = (unsigned long) ss_sp;
2473 current->sas_ss_size = ss_size;
2478 if (copy_to_user(uoss, &oss, sizeof(oss)))
2487 #ifdef __ARCH_WANT_SYS_SIGPENDING
2490 sys_sigpending(old_sigset_t __user *set)
2492 return do_sigpending(set, sizeof(*set));
2497 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2498 /* Some platforms have their own version with special arguments others
2499 support only sys_rt_sigprocmask. */
2502 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2505 old_sigset_t old_set, new_set;
2509 if (copy_from_user(&new_set, set, sizeof(*set)))
2511 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2513 spin_lock_irq(¤t->sighand->siglock);
2514 old_set = current->blocked.sig[0];
2522 sigaddsetmask(¤t->blocked, new_set);
2525 sigdelsetmask(¤t->blocked, new_set);
2528 current->blocked.sig[0] = new_set;
2532 recalc_sigpending();
2533 spin_unlock_irq(¤t->sighand->siglock);
2539 old_set = current->blocked.sig[0];
2542 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2549 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2551 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2553 sys_rt_sigaction(int sig,
2554 const struct sigaction __user *act,
2555 struct sigaction __user *oact,
2558 struct k_sigaction new_sa, old_sa;
2561 /* XXX: Don't preclude handling different sized sigset_t's. */
2562 if (sigsetsize != sizeof(sigset_t))
2566 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2570 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2573 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2579 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2581 #ifdef __ARCH_WANT_SYS_SGETMASK
2584 * For backwards compatibility. Functionality superseded by sigprocmask.
2590 return current->blocked.sig[0];
2594 sys_ssetmask(int newmask)
2598 spin_lock_irq(¤t->sighand->siglock);
2599 old = current->blocked.sig[0];
2601 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2603 recalc_sigpending();
2604 spin_unlock_irq(¤t->sighand->siglock);
2608 #endif /* __ARCH_WANT_SGETMASK */
2610 #ifdef __ARCH_WANT_SYS_SIGNAL
2612 * For backwards compatibility. Functionality superseded by sigaction.
2614 asmlinkage unsigned long
2615 sys_signal(int sig, __sighandler_t handler)
2617 struct k_sigaction new_sa, old_sa;
2620 new_sa.sa.sa_handler = handler;
2621 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2623 ret = do_sigaction(sig, &new_sa, &old_sa);
2625 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2627 #endif /* __ARCH_WANT_SYS_SIGNAL */
2629 #ifdef __ARCH_WANT_SYS_PAUSE
2634 current->state = TASK_INTERRUPTIBLE;
2636 return -ERESTARTNOHAND;
2641 void __init signals_init(void)
2644 kmem_cache_create("sigqueue",
2645 sizeof(struct sigqueue),
2646 __alignof__(struct sigqueue),
2647 SLAB_PANIC, NULL, NULL);