4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp_lock.h>
11 #include <linux/module.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #ifdef CONFIG_BSD_PROCESS_ACCT
16 #include <linux/acct.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
21 #include <asm/mmu_context.h>
23 extern void sem_exit (void);
24 extern struct task_struct *child_reaper;
26 int getrusage(struct task_struct *, int, struct rusage *);
28 static void release_task(struct task_struct * p)
33 * Wait to make sure the process isn't on the
34 * runqueue (active on some other CPU still)
44 } while (task_has_cpu(p));
48 atomic_dec(&p->user->processes);
53 current->cmin_flt += p->min_flt + p->cmin_flt;
54 current->cmaj_flt += p->maj_flt + p->cmaj_flt;
55 current->cnswap += p->nswap + p->cnswap;
57 * Potentially available timeslices are retrieved
58 * here - this way the parent does not get penalized
59 * for creating too many processes.
61 * (this cannot be used to artificially 'generate'
62 * timeslices, because any timeslice recovered here
63 * was given away by the parent in the first place.)
65 current->counter += p->counter;
66 if (current->counter >= MAX_COUNTER)
67 current->counter = MAX_COUNTER;
71 printk("task releasing itself\n");
76 * This checks not only the pgrp, but falls back on the pid if no
77 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
80 int session_of_pgrp(int pgrp)
82 struct task_struct *p;
86 read_lock(&tasklist_lock);
90 if (p->pgrp == pgrp) {
91 fallback = p->session;
95 fallback = p->session;
97 read_unlock(&tasklist_lock);
102 * Determine if a process group is "orphaned", according to the POSIX
103 * definition in 2.2.2.52. Orphaned process groups are not to be affected
104 * by terminal-generated stop signals. Newly orphaned process groups are
105 * to receive a SIGHUP and a SIGCONT.
107 * "I ask you, have you ever known what it is to be an orphan?"
109 static int will_become_orphaned_pgrp(int pgrp, struct task_struct * ignored_task)
111 struct task_struct *p;
113 read_lock(&tasklist_lock);
115 if ((p == ignored_task) || (p->pgrp != pgrp) ||
116 (p->state == TASK_ZOMBIE) ||
117 (p->p_pptr->pid == 1))
119 if ((p->p_pptr->pgrp != pgrp) &&
120 (p->p_pptr->session == p->session)) {
121 read_unlock(&tasklist_lock);
125 read_unlock(&tasklist_lock);
126 return 1; /* (sighing) "Often!" */
129 int is_orphaned_pgrp(int pgrp)
131 return will_become_orphaned_pgrp(pgrp, 0);
134 static inline int has_stopped_jobs(int pgrp)
137 struct task_struct * p;
139 read_lock(&tasklist_lock);
143 if (p->state != TASK_STOPPED)
148 read_unlock(&tasklist_lock);
153 * When we die, we re-parent all our children.
154 * Try to give them to another thread in our process
155 * group, and if no such member exists, give it to
156 * the global child reaper process (ie "init")
158 static inline void forget_original_parent(struct task_struct * father)
160 struct task_struct * p, *reaper;
162 read_lock(&tasklist_lock);
164 /* Next in our thread group */
165 reaper = next_thread(father);
166 if (reaper == father)
167 reaper = child_reaper;
170 if (p->p_opptr == father) {
171 /* We dont want people slaying init */
172 p->exit_signal = SIGCHLD;
175 /* Make sure we're not reparenting to ourselves */
177 p->p_opptr = child_reaper;
181 if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0);
184 read_unlock(&tasklist_lock);
187 static inline void close_files(struct files_struct * files)
195 if (i >= files->max_fdset || i >= files->max_fds)
197 set = files->open_fds->fds_bits[j++];
200 struct file * file = xchg(&files->fd[i], NULL);
202 filp_close(file, files);
210 void put_files_struct(struct files_struct *files)
212 if (atomic_dec_and_test(&files->count)) {
215 * Free the fd and fdset arrays if we expanded them.
217 if (files->fd != &files->fd_array[0])
218 free_fd_array(files->fd, files->max_fds);
219 if (files->max_fdset > __FD_SETSIZE) {
220 free_fdset(files->open_fds, files->max_fdset);
221 free_fdset(files->close_on_exec, files->max_fdset);
223 kmem_cache_free(files_cachep, files);
227 static inline void __exit_files(struct task_struct *tsk)
229 struct files_struct * files = tsk->files;
235 put_files_struct(files);
239 void exit_files(struct task_struct *tsk)
244 static inline void __put_fs_struct(struct fs_struct *fs)
246 /* No need to hold fs->lock if we are killing it */
247 if (atomic_dec_and_test(&fs->count)) {
254 mntput(fs->altrootmnt);
256 kmem_cache_free(fs_cachep, fs);
260 void put_fs_struct(struct fs_struct *fs)
265 static inline void __exit_fs(struct task_struct *tsk)
267 struct fs_struct * fs = tsk->fs;
277 void exit_fs(struct task_struct *tsk)
283 * We can use these to temporarily drop into
284 * "lazy TLB" mode and back.
286 struct mm_struct * start_lazy_tlb(void)
288 struct mm_struct *mm = current->mm;
290 /* active_mm is still 'mm' */
291 atomic_inc(&mm->mm_count);
292 enter_lazy_tlb(mm, current, smp_processor_id());
296 void end_lazy_tlb(struct mm_struct *mm)
298 struct mm_struct *active_mm = current->active_mm;
301 if (mm != active_mm) {
302 current->active_mm = mm;
303 activate_mm(active_mm, mm);
309 * Turn us into a lazy TLB process if we
312 static inline void __exit_mm(struct task_struct * tsk)
314 struct mm_struct * mm = tsk->mm;
318 atomic_inc(&mm->mm_count);
319 if (mm != tsk->active_mm) BUG();
320 /* more a memory barrier than a real lock */
324 enter_lazy_tlb(mm, current, smp_processor_id());
329 void exit_mm(struct task_struct *tsk)
335 * Send signals to all our closest relatives so that they know
336 * to properly mourn us..
338 static void exit_notify(void)
340 struct task_struct * p, *t;
342 forget_original_parent(current);
344 * Check to see if any process groups have become orphaned
345 * as a result of our exiting, and if they have any stopped
346 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
348 * Case i: Our father is in a different pgrp than we are
349 * and we were the only connection outside, so our pgrp
350 * is about to become orphaned.
355 if ((t->pgrp != current->pgrp) &&
356 (t->session == current->session) &&
357 will_become_orphaned_pgrp(current->pgrp, current) &&
358 has_stopped_jobs(current->pgrp)) {
359 kill_pg(current->pgrp,SIGHUP,1);
360 kill_pg(current->pgrp,SIGCONT,1);
363 /* Let father know we died
365 * Thread signals are configurable, but you aren't going to use
366 * that to send signals to arbitary processes.
367 * That stops right now.
369 * If the parent exec id doesn't match the exec id we saved
370 * when we started then we know the parent has changed security
373 * If our self_exec id doesn't match our parent_exec_id then
374 * we have changed execution domain as these two values started
375 * the same after a fork.
379 if(current->exit_signal != SIGCHLD &&
380 ( current->parent_exec_id != t->self_exec_id ||
381 current->self_exec_id != current->parent_exec_id)
382 && !capable(CAP_KILL))
383 current->exit_signal = SIGCHLD;
387 * This loop does two things:
389 * A. Make init inherit all the child processes
390 * B. Check to see if any process groups have become orphaned
391 * as a result of our exiting, and if they have any stopped
392 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
395 write_lock_irq(&tasklist_lock);
396 current->state = TASK_ZOMBIE;
397 do_notify_parent(current, current->exit_signal);
398 while (current->p_cptr != NULL) {
400 current->p_cptr = p->p_osptr;
404 p->p_pptr = p->p_opptr;
405 p->p_osptr = p->p_pptr->p_cptr;
407 p->p_osptr->p_ysptr = p;
408 p->p_pptr->p_cptr = p;
409 if (p->state == TASK_ZOMBIE)
410 do_notify_parent(p, p->exit_signal);
412 * process group orphan check
413 * Case ii: Our child is in a different pgrp
414 * than we are, and it was the only connection
415 * outside, so the child pgrp is now orphaned.
417 if ((p->pgrp != current->pgrp) &&
418 (p->session == current->session)) {
421 write_unlock_irq(&tasklist_lock);
422 if (is_orphaned_pgrp(pgrp) && has_stopped_jobs(pgrp)) {
423 kill_pg(pgrp,SIGHUP,1);
424 kill_pg(pgrp,SIGCONT,1);
426 write_lock_irq(&tasklist_lock);
429 write_unlock_irq(&tasklist_lock);
432 NORET_TYPE void do_exit(long code)
434 struct task_struct *tsk = current;
437 panic("Aiee, killing interrupt handler!");
439 panic("Attempted to kill the idle task!");
441 panic("Attempted to kill init!");
442 tsk->flags |= PF_EXITING;
443 del_timer_sync(&tsk->real_timer);
446 #ifdef CONFIG_BSD_PROCESS_ACCT
459 disassociate_ctty(1);
461 put_exec_domain(tsk->exec_domain);
462 if (tsk->binfmt && tsk->binfmt->module)
463 __MOD_DEC_USE_COUNT(tsk->binfmt->module);
465 tsk->exit_code = code;
470 * In order to get rid of the "volatile function does return" message
471 * I did this little loop that confuses gcc to think do_exit really
472 * is volatile. In fact it's schedule() that is volatile in some
473 * circumstances: when current->state = ZOMBIE, schedule() never
476 * In fact the natural way to do all this is to have the label and the
477 * goto right after each other, but I put the fake_volatile label at
478 * the start of the function just in case something /really/ bad
479 * happens, and the schedule returns. This way we can try again. I'm
480 * not paranoid: it's just that everybody is out to get me.
485 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
493 asmlinkage long sys_exit(int error_code)
495 do_exit((error_code&0xff)<<8);
498 asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
501 DECLARE_WAITQUEUE(wait, current);
502 struct task_struct *tsk;
504 if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL))
507 add_wait_queue(¤t->wait_chldexit,&wait);
510 current->state = TASK_INTERRUPTIBLE;
511 read_lock(&tasklist_lock);
514 struct task_struct *p;
515 for (p = tsk->p_cptr ; p ; p = p->p_osptr) {
520 if (p->pgrp != current->pgrp)
522 } else if (pid != -1) {
526 /* Wait for all children (clone and not) if __WALL is set;
527 * otherwise, wait for clone children *only* if __WCLONE is
528 * set; otherwise, wait for non-clone children *only*. (Note:
529 * A "clone" child here is one that reports to its parent
530 * using a signal other than SIGCHLD.) */
531 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
532 && !(options & __WALL))
539 if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED))
541 read_unlock(&tasklist_lock);
542 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
543 if (!retval && stat_addr)
544 retval = put_user((p->exit_code << 8) | 0x7f, stat_addr);
551 current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
552 current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
553 read_unlock(&tasklist_lock);
554 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
555 if (!retval && stat_addr)
556 retval = put_user(p->exit_code, stat_addr);
560 if (p->p_opptr != p->p_pptr) {
561 write_lock_irq(&tasklist_lock);
563 p->p_pptr = p->p_opptr;
565 do_notify_parent(p, SIGCHLD);
566 write_unlock_irq(&tasklist_lock);
574 if (options & __WNOTHREAD)
576 tsk = next_thread(tsk);
577 } while (tsk != current);
578 read_unlock(&tasklist_lock);
581 if (options & WNOHANG)
583 retval = -ERESTARTSYS;
584 if (signal_pending(current))
591 current->state = TASK_RUNNING;
592 remove_wait_queue(¤t->wait_chldexit,&wait);
596 #if !defined(__alpha__) && !defined(__ia64__)
599 * sys_waitpid() remains for compatibility. waitpid() should be
600 * implemented by calling sys_wait4() from libc.a.
602 asmlinkage long sys_waitpid(pid_t pid,unsigned int * stat_addr, int options)
604 return sys_wait4(pid, stat_addr, options, NULL);