4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/key.h>
38 #include <linux/personality.h>
39 #include <linux/binfmts.h>
40 #include <linux/swap.h>
41 #include <linux/utsname.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/acct.h>
52 #include <asm/uaccess.h>
53 #include <asm/mmu_context.h>
56 #include <linux/kmod.h>
60 char core_pattern[65] = "core";
61 /* The maximal length of core_pattern is also specified in sysctl.c */
63 static struct linux_binfmt *formats;
64 static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
66 int register_binfmt(struct linux_binfmt * fmt)
68 struct linux_binfmt ** tmp = &formats;
74 write_lock(&binfmt_lock);
77 write_unlock(&binfmt_lock);
84 write_unlock(&binfmt_lock);
88 EXPORT_SYMBOL(register_binfmt);
90 int unregister_binfmt(struct linux_binfmt * fmt)
92 struct linux_binfmt ** tmp = &formats;
94 write_lock(&binfmt_lock);
98 write_unlock(&binfmt_lock);
103 write_unlock(&binfmt_lock);
107 EXPORT_SYMBOL(unregister_binfmt);
109 static inline void put_binfmt(struct linux_binfmt * fmt)
111 module_put(fmt->module);
115 * Note that a shared library must be both readable and executable due to
118 * Also note that we take the address to load from from the file itself.
120 asmlinkage long sys_uselib(const char __user * library)
126 nd.intent.open.flags = FMODE_READ;
127 error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
132 if (!S_ISREG(nd.dentry->d_inode->i_mode))
135 error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
139 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
140 error = PTR_ERR(file);
146 struct linux_binfmt * fmt;
148 read_lock(&binfmt_lock);
149 for (fmt = formats ; fmt ; fmt = fmt->next) {
150 if (!fmt->load_shlib)
152 if (!try_module_get(fmt->module))
154 read_unlock(&binfmt_lock);
155 error = fmt->load_shlib(file);
156 read_lock(&binfmt_lock);
158 if (error != -ENOEXEC)
161 read_unlock(&binfmt_lock);
172 * count() counts the number of strings in array ARGV.
174 static int count(char __user * __user * argv, int max)
182 if (get_user(p, argv))
196 * 'copy_strings()' copies argument/environment strings from user
197 * memory to free pages in kernel mem. These are in a format ready
198 * to be put directly into the top of new user memory.
200 int copy_strings(int argc,char __user * __user * argv, struct linux_binprm *bprm)
202 struct page *kmapped_page = NULL;
211 if (get_user(str, argv+argc) ||
212 !(len = strnlen_user(str, bprm->p))) {
223 /* XXX: add architecture specific overflow check here. */
228 int offset, bytes_to_copy;
231 offset = pos % PAGE_SIZE;
233 page = bprm->page[i];
236 page = alloc_page(GFP_HIGHUSER);
237 bprm->page[i] = page;
245 if (page != kmapped_page) {
247 kunmap(kmapped_page);
249 kaddr = kmap(kmapped_page);
252 memset(kaddr, 0, offset);
253 bytes_to_copy = PAGE_SIZE - offset;
254 if (bytes_to_copy > len) {
257 memset(kaddr+offset+len, 0,
258 PAGE_SIZE-offset-len);
260 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
266 pos += bytes_to_copy;
267 str += bytes_to_copy;
268 len -= bytes_to_copy;
274 kunmap(kmapped_page);
279 * Like copy_strings, but get argv and its values from kernel memory.
281 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
284 mm_segment_t oldfs = get_fs();
286 r = copy_strings(argc, (char __user * __user *)argv, bprm);
291 EXPORT_SYMBOL(copy_strings_kernel);
295 * This routine is used to map in a page into an address space: needed by
296 * execve() for the initial stack and environment pages.
298 * vma->vm_mm->mmap_sem is held for writing.
300 void install_arg_page(struct vm_area_struct *vma,
301 struct page *page, unsigned long address)
303 struct mm_struct *mm = vma->vm_mm;
309 if (unlikely(anon_vma_prepare(vma)))
312 flush_dcache_page(page);
313 pgd = pgd_offset(mm, address);
315 spin_lock(&mm->page_table_lock);
316 pud = pud_alloc(mm, pgd, address);
319 pmd = pmd_alloc(mm, pud, address);
322 pte = pte_alloc_map(mm, pmd, address);
325 if (!pte_none(*pte)) {
330 lru_cache_add_active(page);
331 set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(
332 page, vma->vm_page_prot))));
333 page_add_anon_rmap(page, vma, address);
335 spin_unlock(&mm->page_table_lock);
337 /* no need for flush_tlb */
340 spin_unlock(&mm->page_table_lock);
343 force_sig(SIGKILL, current);
346 #define EXTRA_STACK_VM_PAGES 20 /* random */
348 int setup_arg_pages(struct linux_binprm *bprm,
349 unsigned long stack_top,
350 int executable_stack)
352 unsigned long stack_base;
353 struct vm_area_struct *mpnt;
354 struct mm_struct *mm = current->mm;
358 #ifdef CONFIG_STACK_GROWSUP
359 /* Move the argument and environment strings to the bottom of the
365 /* Start by shifting all the pages down */
367 for (j = 0; j < MAX_ARG_PAGES; j++) {
368 struct page *page = bprm->page[j];
371 bprm->page[i++] = page;
374 /* Now move them within their pages */
375 offset = bprm->p % PAGE_SIZE;
376 to = kmap(bprm->page[0]);
377 for (j = 1; j < i; j++) {
378 memmove(to, to + offset, PAGE_SIZE - offset);
379 from = kmap(bprm->page[j]);
380 memcpy(to + PAGE_SIZE - offset, from, offset);
381 kunmap(bprm->page[j - 1]);
384 memmove(to, to + offset, PAGE_SIZE - offset);
385 kunmap(bprm->page[j - 1]);
387 /* Limit stack size to 1GB */
388 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
389 if (stack_base > (1 << 30))
390 stack_base = 1 << 30;
391 stack_base = PAGE_ALIGN(stack_top - stack_base);
393 /* Adjust bprm->p to point to the end of the strings. */
394 bprm->p = stack_base + PAGE_SIZE * i - offset;
396 mm->arg_start = stack_base;
397 arg_size = i << PAGE_SHIFT;
399 /* zero pages that were copied above */
400 while (i < MAX_ARG_PAGES)
401 bprm->page[i++] = NULL;
403 stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE;
404 bprm->p += stack_base;
405 mm->arg_start = bprm->p;
406 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
409 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
412 bprm->loader += stack_base;
413 bprm->exec += stack_base;
415 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
419 if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
420 kmem_cache_free(vm_area_cachep, mpnt);
424 memset(mpnt, 0, sizeof(*mpnt));
426 down_write(&mm->mmap_sem);
429 #ifdef CONFIG_STACK_GROWSUP
430 mpnt->vm_start = stack_base;
431 mpnt->vm_end = stack_base + arg_size;
433 mpnt->vm_end = stack_top;
434 mpnt->vm_start = mpnt->vm_end - arg_size;
436 /* Adjust stack execute permissions; explicitly enable
437 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
438 * and leave alone (arch default) otherwise. */
439 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
440 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
441 else if (executable_stack == EXSTACK_DISABLE_X)
442 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
444 mpnt->vm_flags = VM_STACK_FLAGS;
445 mpnt->vm_flags |= mm->def_flags;
446 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
447 if ((ret = insert_vm_struct(mm, mpnt))) {
448 up_write(&mm->mmap_sem);
449 kmem_cache_free(vm_area_cachep, mpnt);
452 mm->stack_vm = mm->total_vm = vma_pages(mpnt);
455 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
456 struct page *page = bprm->page[i];
458 bprm->page[i] = NULL;
459 install_arg_page(mpnt, page, stack_base);
461 stack_base += PAGE_SIZE;
463 up_write(&mm->mmap_sem);
468 EXPORT_SYMBOL(setup_arg_pages);
470 #define free_arg_pages(bprm) do { } while (0)
474 static inline void free_arg_pages(struct linux_binprm *bprm)
478 for (i = 0; i < MAX_ARG_PAGES; i++) {
480 __free_page(bprm->page[i]);
481 bprm->page[i] = NULL;
485 #endif /* CONFIG_MMU */
487 struct file *open_exec(const char *name)
493 nd.intent.open.flags = FMODE_READ;
494 err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
498 struct inode *inode = nd.dentry->d_inode;
499 file = ERR_PTR(-EACCES);
500 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
501 S_ISREG(inode->i_mode)) {
502 int err = permission(inode, MAY_EXEC, &nd);
503 if (!err && !(inode->i_mode & 0111))
507 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
509 err = deny_write_access(file);
524 EXPORT_SYMBOL(open_exec);
526 int kernel_read(struct file *file, unsigned long offset,
527 char *addr, unsigned long count)
535 /* The cast to a user pointer is valid due to the set_fs() */
536 result = vfs_read(file, (void __user *)addr, count, &pos);
541 EXPORT_SYMBOL(kernel_read);
543 static int exec_mmap(struct mm_struct *mm)
545 struct task_struct *tsk;
546 struct mm_struct * old_mm, *active_mm;
548 /* Notify parent that we're no longer interested in the old VM */
550 old_mm = current->mm;
551 mm_release(tsk, old_mm);
555 * Make sure that if there is a core dump in progress
556 * for the old mm, we get out and die instead of going
557 * through with the exec. We must hold mmap_sem around
558 * checking core_waiters and changing tsk->mm. The
559 * core-inducing thread will increment core_waiters for
560 * each thread whose ->mm == old_mm.
562 down_read(&old_mm->mmap_sem);
563 if (unlikely(old_mm->core_waiters)) {
564 up_read(&old_mm->mmap_sem);
569 active_mm = tsk->active_mm;
572 activate_mm(active_mm, mm);
574 arch_pick_mmap_layout(mm);
576 up_read(&old_mm->mmap_sem);
577 if (active_mm != old_mm) BUG();
586 * This function makes sure the current process has its own signal table,
587 * so that flush_signal_handlers can later reset the handlers without
588 * disturbing other processes. (Other processes might share the signal
589 * table via the CLONE_SIGHAND option to clone().)
591 static inline int de_thread(struct task_struct *tsk)
593 struct signal_struct *sig = tsk->signal;
594 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
595 spinlock_t *lock = &oldsighand->siglock;
599 * If we don't share sighandlers, then we aren't sharing anything
600 * and we can just re-use it all.
602 if (atomic_read(&oldsighand->count) <= 1) {
603 BUG_ON(atomic_read(&sig->count) != 1);
608 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
612 if (thread_group_empty(current))
613 goto no_thread_group;
616 * Kill all other threads in the thread group.
617 * We must hold tasklist_lock to call zap_other_threads.
619 read_lock(&tasklist_lock);
621 if (sig->flags & SIGNAL_GROUP_EXIT) {
623 * Another group action in progress, just
624 * return so that the signal is processed.
626 spin_unlock_irq(lock);
627 read_unlock(&tasklist_lock);
628 kmem_cache_free(sighand_cachep, newsighand);
631 zap_other_threads(current);
632 read_unlock(&tasklist_lock);
635 * Account for the thread group leader hanging around:
638 if (thread_group_leader(current))
640 while (atomic_read(&sig->count) > count) {
641 sig->group_exit_task = current;
642 sig->notify_count = count;
643 __set_current_state(TASK_UNINTERRUPTIBLE);
644 spin_unlock_irq(lock);
648 sig->group_exit_task = NULL;
649 sig->notify_count = 0;
650 spin_unlock_irq(lock);
653 * At this point all other threads have exited, all we have to
654 * do is to wait for the thread group leader to become inactive,
655 * and to assume its PID:
657 if (!thread_group_leader(current)) {
658 struct task_struct *leader = current->group_leader, *parent;
659 struct dentry *proc_dentry1, *proc_dentry2;
660 unsigned long exit_state, ptrace;
663 * Wait for the thread group leader to be a zombie.
664 * It should already be zombie at this point, most
667 while (leader->exit_state != EXIT_ZOMBIE)
670 spin_lock(&leader->proc_lock);
671 spin_lock(¤t->proc_lock);
672 proc_dentry1 = proc_pid_unhash(current);
673 proc_dentry2 = proc_pid_unhash(leader);
674 write_lock_irq(&tasklist_lock);
676 if (leader->tgid != current->tgid)
678 if (current->pid == current->tgid)
681 * An exec() starts a new thread group with the
682 * TGID of the previous thread group. Rehash the
683 * two threads with a switched PID, and release
684 * the former thread group leader:
686 ptrace = leader->ptrace;
687 parent = leader->parent;
688 if (unlikely(ptrace) && unlikely(parent == current)) {
690 * Joker was ptracing his own group leader,
691 * and now he wants to be his own parent!
692 * We can't have that.
697 ptrace_unlink(current);
698 ptrace_unlink(leader);
699 remove_parent(current);
700 remove_parent(leader);
702 switch_exec_pids(leader, current);
704 current->parent = current->real_parent = leader->real_parent;
705 leader->parent = leader->real_parent = child_reaper;
706 current->group_leader = current;
707 leader->group_leader = leader;
709 add_parent(current, current->parent);
710 add_parent(leader, leader->parent);
712 current->ptrace = ptrace;
713 __ptrace_link(current, parent);
716 list_del(¤t->tasks);
717 list_add_tail(¤t->tasks, &init_task.tasks);
718 current->exit_signal = SIGCHLD;
719 exit_state = leader->exit_state;
721 write_unlock_irq(&tasklist_lock);
722 spin_unlock(&leader->proc_lock);
723 spin_unlock(¤t->proc_lock);
724 proc_pid_flush(proc_dentry1);
725 proc_pid_flush(proc_dentry2);
727 if (exit_state != EXIT_ZOMBIE)
729 release_task(leader);
733 * Now there are really no other threads at all,
734 * so it's safe to stop telling them to kill themselves.
739 BUG_ON(atomic_read(&sig->count) != 1);
742 if (atomic_read(&oldsighand->count) == 1) {
744 * Now that we nuked the rest of the thread group,
745 * it turns out we are not sharing sighand any more either.
746 * So we can just keep it.
748 kmem_cache_free(sighand_cachep, newsighand);
751 * Move our state over to newsighand and switch it in.
753 spin_lock_init(&newsighand->siglock);
754 atomic_set(&newsighand->count, 1);
755 memcpy(newsighand->action, oldsighand->action,
756 sizeof(newsighand->action));
758 write_lock_irq(&tasklist_lock);
759 spin_lock(&oldsighand->siglock);
760 spin_lock(&newsighand->siglock);
762 current->sighand = newsighand;
765 spin_unlock(&newsighand->siglock);
766 spin_unlock(&oldsighand->siglock);
767 write_unlock_irq(&tasklist_lock);
769 if (atomic_dec_and_test(&oldsighand->count))
770 kmem_cache_free(sighand_cachep, oldsighand);
773 if (!thread_group_empty(current))
775 if (!thread_group_leader(current))
781 * These functions flushes out all traces of the currently running executable
782 * so that a new one can be started
785 static inline void flush_old_files(struct files_struct * files)
789 spin_lock(&files->file_lock);
791 unsigned long set, i;
795 if (i >= files->max_fds || i >= files->max_fdset)
797 set = files->close_on_exec->fds_bits[j];
800 files->close_on_exec->fds_bits[j] = 0;
801 spin_unlock(&files->file_lock);
802 for ( ; set ; i++,set >>= 1) {
807 spin_lock(&files->file_lock);
810 spin_unlock(&files->file_lock);
813 void get_task_comm(char *buf, struct task_struct *tsk)
815 /* buf must be at least sizeof(tsk->comm) in size */
817 memcpy(buf, tsk->comm, sizeof(tsk->comm));
821 void set_task_comm(struct task_struct *tsk, char *buf)
824 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
828 int flush_old_exec(struct linux_binprm * bprm)
832 struct files_struct *files;
833 char tcomm[sizeof(current->comm)];
836 * Make sure we have a private signal table and that
837 * we are unassociated from the previous thread group.
839 retval = de_thread(current);
844 * Make sure we have private file handles. Ask the
845 * fork helper to do the work for us and the exit
846 * helper to do the cleanup of the old one.
848 files = current->files; /* refcounted so safe to hold */
849 retval = unshare_files();
853 * Release all of the old mmap stuff
855 retval = exec_mmap(bprm->mm);
859 bprm->mm = NULL; /* We're using it now */
861 /* This is the point of no return */
863 put_files_struct(files);
865 current->sas_ss_sp = current->sas_ss_size = 0;
867 if (current->euid == current->uid && current->egid == current->gid)
868 current->mm->dumpable = 1;
869 name = bprm->filename;
870 for (i=0; (ch = *(name++)) != '\0';) {
874 if (i < (sizeof(tcomm) - 1))
878 set_task_comm(current, tcomm);
882 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
883 permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
884 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
886 current->mm->dumpable = 0;
889 /* An exec changes our domain. We are no longer part of the thread
892 current->self_exec_id++;
894 flush_signal_handlers(current, 0);
895 flush_old_files(current->files);
900 put_files_struct(current->files);
901 current->files = files;
906 EXPORT_SYMBOL(flush_old_exec);
909 * Fill the binprm structure from the inode.
910 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
912 int prepare_binprm(struct linux_binprm *bprm)
915 struct inode * inode = bprm->file->f_dentry->d_inode;
918 mode = inode->i_mode;
920 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
921 * generic_permission lets a non-executable through
923 if (!(mode & 0111)) /* with at least _one_ execute bit set */
925 if (bprm->file->f_op == NULL)
928 bprm->e_uid = current->euid;
929 bprm->e_gid = current->egid;
931 if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
933 if (mode & S_ISUID) {
934 current->personality &= ~PER_CLEAR_ON_SETID;
935 bprm->e_uid = inode->i_uid;
940 * If setgid is set but no group execute bit then this
941 * is a candidate for mandatory locking, not a setgid
944 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
945 current->personality &= ~PER_CLEAR_ON_SETID;
946 bprm->e_gid = inode->i_gid;
950 /* fill in binprm security blob */
951 retval = security_bprm_set(bprm);
955 memset(bprm->buf,0,BINPRM_BUF_SIZE);
956 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
959 EXPORT_SYMBOL(prepare_binprm);
961 static inline int unsafe_exec(struct task_struct *p)
964 if (p->ptrace & PT_PTRACED) {
965 if (p->ptrace & PT_PTRACE_CAP)
966 unsafe |= LSM_UNSAFE_PTRACE_CAP;
968 unsafe |= LSM_UNSAFE_PTRACE;
970 if (atomic_read(&p->fs->count) > 1 ||
971 atomic_read(&p->files->count) > 1 ||
972 atomic_read(&p->sighand->count) > 1)
973 unsafe |= LSM_UNSAFE_SHARE;
978 void compute_creds(struct linux_binprm *bprm)
982 if (bprm->e_uid != current->uid)
987 unsafe = unsafe_exec(current);
988 security_bprm_apply_creds(bprm, unsafe);
989 task_unlock(current);
990 security_bprm_post_apply_creds(bprm);
993 EXPORT_SYMBOL(compute_creds);
995 void remove_arg_zero(struct linux_binprm *bprm)
998 unsigned long offset;
1002 offset = bprm->p % PAGE_SIZE;
1005 while (bprm->p++, *(kaddr+offset++)) {
1006 if (offset != PAGE_SIZE)
1009 kunmap_atomic(kaddr, KM_USER0);
1011 page = bprm->page[bprm->p/PAGE_SIZE];
1012 kaddr = kmap_atomic(page, KM_USER0);
1014 kunmap_atomic(kaddr, KM_USER0);
1019 EXPORT_SYMBOL(remove_arg_zero);
1022 * cycle the list of binary formats handler, until one recognizes the image
1024 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1027 struct linux_binfmt *fmt;
1029 /* handle /sbin/loader.. */
1031 struct exec * eh = (struct exec *) bprm->buf;
1033 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1034 (eh->fh.f_flags & 0x3000) == 0x3000)
1037 unsigned long loader;
1039 allow_write_access(bprm->file);
1043 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1045 file = open_exec("/sbin/loader");
1046 retval = PTR_ERR(file);
1050 /* Remember if the application is TASO. */
1051 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1054 bprm->loader = loader;
1055 retval = prepare_binprm(bprm);
1058 /* should call search_binary_handler recursively here,
1059 but it does not matter */
1063 retval = security_bprm_check(bprm);
1067 /* kernel module loader fixup */
1068 /* so we don't try to load run modprobe in kernel space. */
1071 for (try=0; try<2; try++) {
1072 read_lock(&binfmt_lock);
1073 for (fmt = formats ; fmt ; fmt = fmt->next) {
1074 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1077 if (!try_module_get(fmt->module))
1079 read_unlock(&binfmt_lock);
1080 retval = fn(bprm, regs);
1083 allow_write_access(bprm->file);
1087 current->did_exec = 1;
1090 read_lock(&binfmt_lock);
1092 if (retval != -ENOEXEC || bprm->mm == NULL)
1095 read_unlock(&binfmt_lock);
1099 read_unlock(&binfmt_lock);
1100 if (retval != -ENOEXEC || bprm->mm == NULL) {
1104 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1105 if (printable(bprm->buf[0]) &&
1106 printable(bprm->buf[1]) &&
1107 printable(bprm->buf[2]) &&
1108 printable(bprm->buf[3]))
1109 break; /* -ENOEXEC */
1110 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1117 EXPORT_SYMBOL(search_binary_handler);
1120 * sys_execve() executes a new program.
1122 int do_execve(char * filename,
1123 char __user *__user *argv,
1124 char __user *__user *envp,
1125 struct pt_regs * regs)
1127 struct linux_binprm *bprm;
1133 bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
1136 memset(bprm, 0, sizeof(*bprm));
1138 file = open_exec(filename);
1139 retval = PTR_ERR(file);
1145 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1148 bprm->filename = filename;
1149 bprm->interp = filename;
1150 bprm->mm = mm_alloc();
1155 retval = init_new_context(current, bprm->mm);
1159 bprm->argc = count(argv, bprm->p / sizeof(void *));
1160 if ((retval = bprm->argc) < 0)
1163 bprm->envc = count(envp, bprm->p / sizeof(void *));
1164 if ((retval = bprm->envc) < 0)
1167 retval = security_bprm_alloc(bprm);
1171 retval = prepare_binprm(bprm);
1175 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1179 bprm->exec = bprm->p;
1180 retval = copy_strings(bprm->envc, envp, bprm);
1184 retval = copy_strings(bprm->argc, argv, bprm);
1188 retval = search_binary_handler(bprm,regs);
1190 free_arg_pages(bprm);
1192 /* execve success */
1193 security_bprm_free(bprm);
1194 acct_update_integrals();
1195 update_mem_hiwater();
1201 /* Something went wrong, return the inode and free the argument pages*/
1202 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1203 struct page * page = bprm->page[i];
1209 security_bprm_free(bprm);
1217 allow_write_access(bprm->file);
1228 int set_binfmt(struct linux_binfmt *new)
1230 struct linux_binfmt *old = current->binfmt;
1233 if (!try_module_get(new->module))
1236 current->binfmt = new;
1238 module_put(old->module);
1242 EXPORT_SYMBOL(set_binfmt);
1244 #define CORENAME_MAX_SIZE 64
1246 /* format_corename will inspect the pattern parameter, and output a
1247 * name into corename, which must have space for at least
1248 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1250 static void format_corename(char *corename, const char *pattern, long signr)
1252 const char *pat_ptr = pattern;
1253 char *out_ptr = corename;
1254 char *const out_end = corename + CORENAME_MAX_SIZE;
1256 int pid_in_pattern = 0;
1258 /* Repeat as long as we have more pattern to process and more output
1261 if (*pat_ptr != '%') {
1262 if (out_ptr == out_end)
1264 *out_ptr++ = *pat_ptr++;
1266 switch (*++pat_ptr) {
1269 /* Double percent, output one percent */
1271 if (out_ptr == out_end)
1278 rc = snprintf(out_ptr, out_end - out_ptr,
1279 "%d", current->tgid);
1280 if (rc > out_end - out_ptr)
1286 rc = snprintf(out_ptr, out_end - out_ptr,
1287 "%d", current->uid);
1288 if (rc > out_end - out_ptr)
1294 rc = snprintf(out_ptr, out_end - out_ptr,
1295 "%d", current->gid);
1296 if (rc > out_end - out_ptr)
1300 /* signal that caused the coredump */
1302 rc = snprintf(out_ptr, out_end - out_ptr,
1304 if (rc > out_end - out_ptr)
1308 /* UNIX time of coredump */
1311 do_gettimeofday(&tv);
1312 rc = snprintf(out_ptr, out_end - out_ptr,
1314 if (rc > out_end - out_ptr)
1321 down_read(&uts_sem);
1322 rc = snprintf(out_ptr, out_end - out_ptr,
1323 "%s", system_utsname.nodename);
1325 if (rc > out_end - out_ptr)
1331 rc = snprintf(out_ptr, out_end - out_ptr,
1332 "%s", current->comm);
1333 if (rc > out_end - out_ptr)
1343 /* Backward compatibility with core_uses_pid:
1345 * If core_pattern does not include a %p (as is the default)
1346 * and core_uses_pid is set, then .%pid will be appended to
1349 && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
1350 rc = snprintf(out_ptr, out_end - out_ptr,
1351 ".%d", current->tgid);
1352 if (rc > out_end - out_ptr)
1360 static void zap_threads (struct mm_struct *mm)
1362 struct task_struct *g, *p;
1363 struct task_struct *tsk = current;
1364 struct completion *vfork_done = tsk->vfork_done;
1368 * Make sure nobody is waiting for us to release the VM,
1369 * otherwise we can deadlock when we wait on each other
1372 tsk->vfork_done = NULL;
1373 complete(vfork_done);
1376 read_lock(&tasklist_lock);
1378 if (mm == p->mm && p != tsk) {
1379 force_sig_specific(SIGKILL, p);
1381 if (unlikely(p->ptrace) &&
1382 unlikely(p->parent->mm == mm))
1385 while_each_thread(g,p);
1387 read_unlock(&tasklist_lock);
1389 if (unlikely(traced)) {
1391 * We are zapping a thread and the thread it ptraces.
1392 * If the tracee went into a ptrace stop for exit tracing,
1393 * we could deadlock since the tracer is waiting for this
1394 * coredump to finish. Detach them so they can both die.
1396 write_lock_irq(&tasklist_lock);
1397 do_each_thread(g,p) {
1398 if (mm == p->mm && p != tsk &&
1399 p->ptrace && p->parent->mm == mm) {
1402 } while_each_thread(g,p);
1403 write_unlock_irq(&tasklist_lock);
1407 static void coredump_wait(struct mm_struct *mm)
1409 DECLARE_COMPLETION(startup_done);
1411 mm->core_waiters++; /* let other threads block */
1412 mm->core_startup_done = &startup_done;
1414 /* give other threads a chance to run: */
1418 if (--mm->core_waiters) {
1419 up_write(&mm->mmap_sem);
1420 wait_for_completion(&startup_done);
1422 up_write(&mm->mmap_sem);
1423 BUG_ON(mm->core_waiters);
1426 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1428 char corename[CORENAME_MAX_SIZE + 1];
1429 struct mm_struct *mm = current->mm;
1430 struct linux_binfmt * binfmt;
1431 struct inode * inode;
1435 binfmt = current->binfmt;
1436 if (!binfmt || !binfmt->core_dump)
1438 down_write(&mm->mmap_sem);
1439 if (!mm->dumpable) {
1440 up_write(&mm->mmap_sem);
1444 init_completion(&mm->core_done);
1445 spin_lock_irq(¤t->sighand->siglock);
1446 current->signal->flags = SIGNAL_GROUP_EXIT;
1447 current->signal->group_exit_code = exit_code;
1448 spin_unlock_irq(¤t->sighand->siglock);
1452 * Clear any false indication of pending signals that might
1453 * be seen by the filesystem code called to write the core file.
1455 current->signal->group_stop_count = 0;
1456 clear_thread_flag(TIF_SIGPENDING);
1458 if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1462 * lock_kernel() because format_corename() is controlled by sysctl, which
1463 * uses lock_kernel()
1466 format_corename(corename, core_pattern, signr);
1468 file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
1471 inode = file->f_dentry->d_inode;
1472 if (inode->i_nlink > 1)
1473 goto close_fail; /* multiple links - don't dump */
1474 if (d_unhashed(file->f_dentry))
1477 if (!S_ISREG(inode->i_mode))
1481 if (!file->f_op->write)
1483 if (do_truncate(file->f_dentry, 0) != 0)
1486 retval = binfmt->core_dump(signr, regs, file);
1489 current->signal->group_exit_code |= 0x80;
1491 filp_close(file, NULL);
1493 complete_all(&mm->core_done);