2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 2000-2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * These routines maintain argument size conversion between 32bit and 64bit
16 #include <linux/config.h>
17 #include <linux/kernel.h>
18 #include <linux/sysctl.h>
19 #include <linux/sched.h>
21 #include <linux/file.h>
22 #include <linux/signal.h>
23 #include <linux/resource.h>
24 #include <linux/times.h>
25 #include <linux/utsname.h>
26 #include <linux/timex.h>
27 #include <linux/smp.h>
28 #include <linux/smp_lock.h>
29 #include <linux/sem.h>
30 #include <linux/msg.h>
32 #include <linux/shm.h>
33 #include <linux/slab.h>
34 #include <linux/uio.h>
35 #include <linux/nfs_fs.h>
36 #include <linux/smb_fs.h>
37 #include <linux/smb_mount.h>
38 #include <linux/ncp_fs.h>
39 #include <linux/quota.h>
40 #include <linux/sunrpc/svc.h>
41 #include <linux/nfsd/nfsd.h>
42 #include <linux/nfsd/cache.h>
43 #include <linux/nfsd/xdr.h>
44 #include <linux/nfsd/syscall.h>
45 #include <linux/poll.h>
46 #include <linux/eventpoll.h>
47 #include <linux/personality.h>
48 #include <linux/ptrace.h>
49 #include <linux/stat.h>
50 #include <linux/ipc.h>
51 #include <linux/compat.h>
52 #include <linux/vfs.h>
54 #include <asm/intrinsics.h>
55 #include <asm/semaphore.h>
56 #include <asm/types.h>
57 #include <asm/uaccess.h>
67 # define DBG(fmt...) printk(KERN_DEBUG fmt)
72 #define A(__x) ((unsigned long)(__x))
73 #define AA(__x) ((unsigned long)(__x))
74 #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
75 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
77 #define OFFSET4K(a) ((a) & 0xfff)
78 #define PAGE_START(addr) ((addr) & PAGE_MASK)
79 #define MINSIGSTKSZ_IA32 2048
81 #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
82 #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
84 extern asmlinkage long sys_execve (char *, char **, char **, struct pt_regs *);
85 extern asmlinkage long sys_mprotect (unsigned long, size_t, unsigned long);
86 extern asmlinkage long sys_munmap (unsigned long, size_t);
87 extern unsigned long arch_get_unmapped_area (struct file *, unsigned long, unsigned long,
88 unsigned long, unsigned long);
90 /* forward declaration: */
91 asmlinkage long sys32_mprotect (unsigned int, unsigned int, int);
92 asmlinkage unsigned long sys_brk(unsigned long);
95 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
98 /* XXX make per-mm: */
99 static DECLARE_MUTEX(ia32_mmap_sem);
102 nargs (unsigned int arg, char **ap)
112 err = get_user(addr, (unsigned int *)A(arg));
116 *ap++ = (char *) A(addr);
117 arg += sizeof(unsigned int);
124 sys32_execve (char *filename, unsigned int argv, unsigned int envp,
125 struct pt_regs *regs)
127 unsigned long old_map_base, old_task_size, tssd;
132 na = nargs(argv, NULL);
135 ne = nargs(envp, NULL);
138 len = (na + ne + 2) * sizeof(*av);
139 av = kmalloc(len, GFP_KERNEL);
154 old_map_base = current->thread.map_base;
155 old_task_size = current->thread.task_size;
156 tssd = ia64_get_kr(IA64_KR_TSSD);
158 /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
159 current->thread.map_base = DEFAULT_MAP_BASE;
160 current->thread.task_size = DEFAULT_TASK_SIZE;
161 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
162 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
165 r = sys_execve(filename, av, ae, regs);
167 /* oops, execve failed, switch back to old values... */
168 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
169 ia64_set_kr(IA64_KR_TSSD, tssd);
170 current->thread.map_base = old_map_base;
171 current->thread.task_size = old_task_size;
172 set_fs(USER_DS); /* establish new task-size as the address-limit */
179 int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf)
183 if ((u64) stat->size > MAX_NON_LFS ||
184 !old_valid_dev(stat->dev) ||
185 !old_valid_dev(stat->rdev))
188 if (clear_user(ubuf, sizeof(*ubuf)))
191 err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
192 err |= __put_user(stat->ino, &ubuf->st_ino);
193 err |= __put_user(stat->mode, &ubuf->st_mode);
194 err |= __put_user(stat->nlink, &ubuf->st_nlink);
195 err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
196 err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
197 err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
198 err |= __put_user(stat->size, &ubuf->st_size);
199 err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
200 err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
201 err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
202 err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
203 err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
204 err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
205 err |= __put_user(stat->blksize, &ubuf->st_blksize);
206 err |= __put_user(stat->blocks, &ubuf->st_blocks);
210 #if PAGE_SHIFT > IA32_PAGE_SHIFT
214 get_page_prot (struct vm_area_struct *vma, unsigned long addr)
218 if (!vma || vma->vm_start > addr)
221 if (vma->vm_flags & VM_READ)
223 if (vma->vm_flags & VM_WRITE)
225 if (vma->vm_flags & VM_EXEC)
231 * Map a subpage by creating an anonymous page that contains the union of the old page and
235 mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
240 unsigned long ret = 0;
241 struct vm_area_struct *vma = find_vma(current->mm, start);
242 int old_prot = get_page_prot(vma, start);
244 DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
245 file, start, end, prot, flags, off);
248 /* Optimize the case where the old mmap and the new mmap are both anonymous */
249 if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
250 if (clear_user((void *) start, end - start)) {
257 page = (void *) get_zeroed_page(GFP_KERNEL);
262 copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE);
264 down_write(¤t->mm->mmap_sem);
266 ret = do_mmap(0, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
267 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
269 up_write(¤t->mm->mmap_sem);
271 if (IS_ERR((void *) ret))
275 /* copy back the old page contents. */
276 if (offset_in_page(start))
277 copy_to_user((void *) PAGE_START(start), page, offset_in_page(start));
278 if (offset_in_page(end))
279 copy_to_user((void *) end, page + offset_in_page(end),
280 PAGE_SIZE - offset_in_page(end));
283 if (!(flags & MAP_ANONYMOUS)) {
284 /* read the file contents */
285 inode = file->f_dentry->d_inode;
286 if (!inode->i_fop || !file->f_op->read
287 || ((*file->f_op->read)(file, (char *) start, end - start, &off) < 0))
295 if (!(prot & PROT_WRITE))
296 ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
299 free_page((unsigned long) page);
304 emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
307 unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
312 pstart = PAGE_START(start);
313 pend = PAGE_ALIGN(end);
315 if (flags & MAP_FIXED) {
316 if (start > pstart) {
317 if (flags & MAP_SHARED)
319 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
320 current->comm, current->pid, start);
321 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
323 if (IS_ERR((void *) ret))
327 return start; /* done */
330 if (flags & MAP_SHARED)
332 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
333 current->comm, current->pid, end);
334 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
335 (off + len) - offset_in_page(end));
336 if (IS_ERR((void *) ret))
340 return start; /* done */
344 * If a start address was specified, use it if the entire rounded out area
347 if (start && !pstart)
348 fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
349 tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
352 start = pstart + offset_in_page(off); /* make start congruent with off */
354 pend = PAGE_ALIGN(end);
358 poff = off + (pstart - start); /* note: (pstart - start) may be negative */
359 is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
361 if ((flags & MAP_SHARED) && !is_congruent)
362 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
363 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
365 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
366 is_congruent ? "congruent" : "not congruent", poff);
368 down_write(¤t->mm->mmap_sem);
370 if (!(flags & MAP_ANONYMOUS) && is_congruent)
371 ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
373 ret = do_mmap(0, pstart, pend - pstart,
374 prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
375 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
377 up_write(¤t->mm->mmap_sem);
379 if (IS_ERR((void *) ret))
383 /* read the file contents */
384 inode = file->f_dentry->d_inode;
385 if (!inode->i_fop || !file->f_op->read
386 || ((*file->f_op->read)(file, (char *) pstart, pend - pstart, &poff) < 0))
388 sys_munmap(pstart, pend - pstart);
391 if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
397 #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
399 static inline unsigned int
400 get_prot32 (unsigned int prot)
402 if (prot & PROT_WRITE)
403 /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
404 prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
405 else if (prot & (PROT_READ | PROT_EXEC))
406 /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
407 prot |= (PROT_READ | PROT_EXEC);
413 ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
416 DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
417 file, addr, len, prot, flags, offset);
419 if (file && (!file->f_op || !file->f_op->mmap))
422 len = IA32_PAGE_ALIGN(len);
426 if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
428 if (flags & MAP_FIXED)
434 if (OFFSET4K(offset))
437 prot = get_prot32(prot);
439 #if PAGE_SHIFT > IA32_PAGE_SHIFT
440 down(&ia32_mmap_sem);
442 addr = emulate_mmap(file, addr, len, prot, flags, offset);
446 down_write(¤t->mm->mmap_sem);
448 addr = do_mmap(file, addr, len, prot, flags, offset);
450 up_write(¤t->mm->mmap_sem);
452 DBG("ia32_do_mmap: returning 0x%lx\n", addr);
457 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
458 * system calls used a memory block for parameter passing..
461 struct mmap_arg_struct {
471 sys32_mmap (struct mmap_arg_struct *arg)
473 struct mmap_arg_struct a;
474 struct file *file = NULL;
478 if (copy_from_user(&a, arg, sizeof(a)))
481 if (OFFSET4K(a.offset))
486 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
487 if (!(flags & MAP_ANONYMOUS)) {
493 addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
501 sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
502 unsigned int fd, unsigned int pgoff)
504 struct file *file = NULL;
505 unsigned long retval;
507 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
508 if (!(flags & MAP_ANONYMOUS)) {
514 retval = ia32_do_mmap(file, addr, len, prot, flags,
515 (unsigned long) pgoff << IA32_PAGE_SHIFT);
523 sys32_munmap (unsigned int start, unsigned int len)
525 unsigned int end = start + len;
528 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
529 ret = sys_munmap(start, end - start);
534 start = PAGE_ALIGN(start);
535 end = PAGE_START(end);
540 down(&ia32_mmap_sem);
542 ret = sys_munmap(start, end - start);
549 #if PAGE_SHIFT > IA32_PAGE_SHIFT
552 * When mprotect()ing a partial page, we set the permission to the union of the old
553 * settings and the new settings. In other words, it's only possible to make access to a
554 * partial page less restrictive.
557 mprotect_subpage (unsigned long address, int new_prot)
560 struct vm_area_struct *vma;
562 if (new_prot == PROT_NONE)
563 return 0; /* optimize case where nothing changes... */
564 vma = find_vma(current->mm, address);
565 old_prot = get_page_prot(vma, address);
566 return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
569 #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
572 sys32_mprotect (unsigned int start, unsigned int len, int prot)
574 unsigned long end = start + len;
575 #if PAGE_SHIFT > IA32_PAGE_SHIFT
579 prot = get_prot32(prot);
581 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
582 return sys_mprotect(start, end - start, prot);
587 end = IA32_PAGE_ALIGN(end);
591 down(&ia32_mmap_sem);
593 if (offset_in_page(start)) {
594 /* start address is 4KB aligned but not page aligned. */
595 retval = mprotect_subpage(PAGE_START(start), prot);
599 start = PAGE_ALIGN(start);
601 goto out; /* retval is already zero... */
604 if (offset_in_page(end)) {
605 /* end address is 4KB aligned but not page aligned. */
606 retval = mprotect_subpage(PAGE_START(end), prot);
610 end = PAGE_START(end);
612 retval = sys_mprotect(start, end - start, prot);
626 retval = do_pipe(fds);
629 if (copy_to_user(fd, fds, sizeof(fds)))
636 get_tv32 (struct timeval *o, struct compat_timeval *i)
638 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
639 (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
643 put_tv32 (struct compat_timeval *o, struct timeval *i)
645 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
646 (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
649 asmlinkage unsigned long
650 sys32_alarm (unsigned int seconds)
652 struct itimerval it_new, it_old;
653 unsigned int oldalarm;
655 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
656 it_new.it_value.tv_sec = seconds;
657 it_new.it_value.tv_usec = 0;
658 do_setitimer(ITIMER_REAL, &it_new, &it_old);
659 oldalarm = it_old.it_value.tv_sec;
660 /* ehhh.. We can't return 0 if we have an alarm pending.. */
661 /* And we'd better return too much than too little anyway */
662 if (it_old.it_value.tv_usec)
667 /* Translations due to time_t size differences. Which affects all
668 sorts of things, like timeval and itimerval. */
670 extern struct timezone sys_tz;
673 sys32_gettimeofday (struct compat_timeval *tv, struct timezone *tz)
677 do_gettimeofday(&ktv);
678 if (put_tv32(tv, &ktv))
682 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
689 sys32_settimeofday (struct compat_timeval *tv, struct timezone *tz)
696 if (get_tv32(&ktv, tv))
698 kts.tv_sec = ktv.tv_sec;
699 kts.tv_nsec = ktv.tv_usec * 1000;
702 if (copy_from_user(&ktz, tz, sizeof(ktz)))
706 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
709 struct getdents32_callback {
710 struct compat_dirent * current_dir;
711 struct compat_dirent * previous;
716 struct readdir32_callback {
717 struct old_linux32_dirent * dirent;
722 filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
725 struct compat_dirent * dirent;
726 struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
727 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4);
729 buf->error = -EINVAL; /* only used if we fail.. */
730 if (reclen > buf->count)
732 buf->error = -EFAULT; /* only used if we fail.. */
733 dirent = buf->previous;
735 if (put_user(offset, &dirent->d_off))
737 dirent = buf->current_dir;
738 buf->previous = dirent;
739 if (put_user(ino, &dirent->d_ino)
740 || put_user(reclen, &dirent->d_reclen)
741 || copy_to_user(dirent->d_name, name, namlen)
742 || put_user(0, dirent->d_name + namlen))
744 dirent = (struct compat_dirent *) ((char *) dirent + reclen);
745 buf->current_dir = dirent;
746 buf->count -= reclen;
751 sys32_getdents (unsigned int fd, struct compat_dirent *dirent, unsigned int count)
754 struct compat_dirent * lastdirent;
755 struct getdents32_callback buf;
763 buf.current_dir = dirent;
768 error = vfs_readdir(file, filldir32, &buf);
772 lastdirent = buf.previous;
775 if (put_user(file->f_pos, &lastdirent->d_off))
777 error = count - buf.count;
787 fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
790 struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
791 struct old_linux32_dirent * dirent;
796 dirent = buf->dirent;
797 if (put_user(ino, &dirent->d_ino)
798 || put_user(offset, &dirent->d_offset)
799 || put_user(namlen, &dirent->d_namlen)
800 || copy_to_user(dirent->d_name, name, namlen)
801 || put_user(0, dirent->d_name + namlen))
807 sys32_readdir (unsigned int fd, void *dirent, unsigned int count)
811 struct readdir32_callback buf;
821 error = vfs_readdir(file, fillonedir32, &buf);
830 * We can actually return ERESTARTSYS instead of EINTR, but I'd
831 * like to be certain this leads to no problems. So I return
832 * EINTR just for safety.
834 * Update: ERESTARTSYS breaks at least the xview clock binary, so
835 * I'm trying ERESTARTNOHAND which restart only when you want to.
837 #define MAX_SELECT_SECONDS \
838 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
839 #define ROUND_UP_TIME(x,y) (((x)+(y)-1)/(y))
842 sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timeval *tvp32)
849 timeout = MAX_SCHEDULE_TIMEOUT;
854 if (get_user(sec, &tvp32->tv_sec) || get_user(usec, &tvp32->tv_usec))
858 if (sec < 0 || usec < 0)
861 if ((unsigned long) sec < MAX_SELECT_SECONDS) {
862 timeout = ROUND_UP_TIME(usec, 1000000/HZ);
863 timeout += sec * (unsigned long) HZ;
871 if (n > current->files->max_fdset)
872 n = current->files->max_fdset;
875 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
876 * since we used fdset we need to allocate memory in units of
881 bits = kmalloc(6 * size, GFP_KERNEL);
884 fds.in = (unsigned long *) bits;
885 fds.out = (unsigned long *) (bits + size);
886 fds.ex = (unsigned long *) (bits + 2*size);
887 fds.res_in = (unsigned long *) (bits + 3*size);
888 fds.res_out = (unsigned long *) (bits + 4*size);
889 fds.res_ex = (unsigned long *) (bits + 5*size);
891 if ((ret = get_fd_set(n, inp, fds.in)) ||
892 (ret = get_fd_set(n, outp, fds.out)) ||
893 (ret = get_fd_set(n, exp, fds.ex)))
895 zero_fd_set(n, fds.res_in);
896 zero_fd_set(n, fds.res_out);
897 zero_fd_set(n, fds.res_ex);
899 ret = do_select(n, &fds, &timeout);
901 if (tvp32 && !(current->personality & STICKY_TIMEOUTS)) {
902 time_t sec = 0, usec = 0;
906 usec *= (1000000/HZ);
908 if (put_user(sec, &tvp32->tv_sec) || put_user(usec, &tvp32->tv_usec)) {
917 ret = -ERESTARTNOHAND;
918 if (signal_pending(current))
923 set_fd_set(n, inp, fds.res_in);
924 set_fd_set(n, outp, fds.res_out);
925 set_fd_set(n, exp, fds.res_ex);
933 struct sel_arg_struct {
942 sys32_old_select (struct sel_arg_struct *arg)
944 struct sel_arg_struct a;
946 if (copy_from_user(&a, arg, sizeof(a)))
948 return sys32_select(a.n, (fd_set *) A(a.inp), (fd_set *) A(a.outp), (fd_set *) A(a.exp),
949 (struct compat_timeval *) A(a.tvp));
952 asmlinkage ssize_t sys_readv (unsigned long,const struct iovec *,unsigned long);
953 asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long);
955 static struct iovec *
956 get_compat_iovec (struct compat_iovec *iov32, struct iovec *iov_buf, u32 count, int type)
959 struct iovec *ivp, *iov;
961 /* Get the "struct iovec" from user memory */
965 if (verify_area(VERIFY_READ, iov32, sizeof(struct compat_iovec)*count))
967 if (count > UIO_MAXIOV)
969 if (count > UIO_FASTIOV) {
970 iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL);
977 for (i = 0; i < count; i++) {
978 if (__get_user(len, &iov32->iov_len) || __get_user(buf, &iov32->iov_base)) {
983 if (verify_area(type, (void *)A(buf), len)) {
986 return((struct iovec *)0);
988 ivp->iov_base = (void *)A(buf);
989 ivp->iov_len = (__kernel_size_t) len;
997 sys32_readv (int fd, struct compat_iovec *vector, u32 count)
999 struct iovec iovstack[UIO_FASTIOV];
1002 mm_segment_t old_fs = get_fs();
1004 iov = get_compat_iovec(vector, iovstack, count, VERIFY_WRITE);
1008 ret = sys_readv(fd, iov, count);
1010 if (iov != iovstack)
1016 sys32_writev (int fd, struct compat_iovec *vector, u32 count)
1018 struct iovec iovstack[UIO_FASTIOV];
1021 mm_segment_t old_fs = get_fs();
1023 iov = get_compat_iovec(vector, iovstack, count, VERIFY_READ);
1027 ret = sys_writev(fd, iov, count);
1029 if (iov != iovstack)
1035 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
1037 * This is really horribly ugly.
1040 struct msgbuf32 { s32 mtype; char mtext[1]; };
1052 struct ipc64_perm32 {
1056 compat_uid32_t cuid;
1057 compat_gid32_t cgid;
1059 unsigned short __pad1;
1061 unsigned short __pad2;
1062 unsigned int unused1;
1063 unsigned int unused2;
1067 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
1068 compat_time_t sem_otime; /* last semop time */
1069 compat_time_t sem_ctime; /* last change time */
1070 u32 sem_base; /* ptr to first semaphore in array */
1071 u32 sem_pending; /* pending operations to be processed */
1072 u32 sem_pending_last; /* last pending operation */
1073 u32 undo; /* undo requests on this array */
1074 unsigned short sem_nsems; /* no. of semaphores in array */
1077 struct semid64_ds32 {
1078 struct ipc64_perm32 sem_perm;
1079 compat_time_t sem_otime;
1080 unsigned int __unused1;
1081 compat_time_t sem_ctime;
1082 unsigned int __unused2;
1083 unsigned int sem_nsems;
1084 unsigned int __unused3;
1085 unsigned int __unused4;
1089 struct ipc_perm32 msg_perm;
1092 compat_time_t msg_stime;
1093 compat_time_t msg_rtime;
1094 compat_time_t msg_ctime;
1097 unsigned short msg_cbytes;
1098 unsigned short msg_qnum;
1099 unsigned short msg_qbytes;
1100 compat_ipc_pid_t msg_lspid;
1101 compat_ipc_pid_t msg_lrpid;
1104 struct msqid64_ds32 {
1105 struct ipc64_perm32 msg_perm;
1106 compat_time_t msg_stime;
1107 unsigned int __unused1;
1108 compat_time_t msg_rtime;
1109 unsigned int __unused2;
1110 compat_time_t msg_ctime;
1111 unsigned int __unused3;
1112 unsigned int msg_cbytes;
1113 unsigned int msg_qnum;
1114 unsigned int msg_qbytes;
1115 compat_pid_t msg_lspid;
1116 compat_pid_t msg_lrpid;
1117 unsigned int __unused4;
1118 unsigned int __unused5;
1122 struct ipc_perm32 shm_perm;
1124 compat_time_t shm_atime;
1125 compat_time_t shm_dtime;
1126 compat_time_t shm_ctime;
1127 compat_ipc_pid_t shm_cpid;
1128 compat_ipc_pid_t shm_lpid;
1129 unsigned short shm_nattch;
1132 struct shmid64_ds32 {
1133 struct ipc64_perm32 shm_perm;
1134 compat_size_t shm_segsz;
1135 compat_time_t shm_atime;
1136 unsigned int __unused1;
1137 compat_time_t shm_dtime;
1138 unsigned int __unused2;
1139 compat_time_t shm_ctime;
1140 unsigned int __unused3;
1141 compat_pid_t shm_cpid;
1142 compat_pid_t shm_lpid;
1143 unsigned int shm_nattch;
1144 unsigned int __unused4;
1145 unsigned int __unused5;
1148 struct shminfo64_32 {
1149 unsigned int shmmax;
1150 unsigned int shmmin;
1151 unsigned int shmmni;
1152 unsigned int shmseg;
1153 unsigned int shmall;
1154 unsigned int __unused1;
1155 unsigned int __unused2;
1156 unsigned int __unused3;
1157 unsigned int __unused4;
1162 u32 shm_tot, shm_rss, shm_swp;
1163 u32 swap_attempts, swap_successes;
1174 #define SEMTIMEDOP 4
1184 #define IPCOP_MASK(__x) (1UL << (__x))
1187 ipc_parse_version32 (int *cmd)
1189 if (*cmd & IPC_64) {
1198 semctl32 (int first, int second, int third, void *uptr)
1203 struct semid64_ds s;
1204 mm_segment_t old_fs;
1205 int version = ipc_parse_version32(&third);
1209 if (get_user(pad, (u32 *)uptr))
1211 if (third == SETVAL)
1212 fourth.val = (int)pad;
1214 fourth.__pad = (void *)A(pad);
1231 err = sys_semctl(first, second, third, fourth);
1239 err = sys_semctl(first, second, third, fourth);
1242 if (version == IPC_64) {
1243 struct semid64_ds32 *usp64 = (struct semid64_ds32 *) A(pad);
1245 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
1249 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
1250 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
1251 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
1252 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
1253 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
1254 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
1255 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
1256 err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
1257 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
1258 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
1260 struct semid_ds32 *usp32 = (struct semid_ds32 *) A(pad);
1262 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
1266 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
1267 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
1268 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
1269 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
1270 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
1271 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
1272 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
1273 err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
1274 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
1275 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
1285 do_sys32_msgsnd (int first, int second, int third, void *uptr)
1287 struct msgbuf *p = kmalloc(second + sizeof(struct msgbuf), GFP_USER);
1288 struct msgbuf32 *up = (struct msgbuf32 *)uptr;
1289 mm_segment_t old_fs;
1294 err = get_user(p->mtype, &up->mtype);
1295 err |= copy_from_user(p->mtext, &up->mtext, second);
1300 err = sys_msgsnd(first, p, second, third);
1308 do_sys32_msgrcv (int first, int second, int msgtyp, int third, int version, void *uptr)
1310 struct msgbuf32 *up;
1312 mm_segment_t old_fs;
1316 struct ipc_kludge *uipck = (struct ipc_kludge *)uptr;
1317 struct ipc_kludge ipck;
1323 if (copy_from_user(&ipck, uipck, sizeof(struct ipc_kludge)))
1325 uptr = (void *)A(ipck.msgp);
1326 msgtyp = ipck.msgtyp;
1329 p = kmalloc(second + sizeof(struct msgbuf), GFP_USER);
1334 err = sys_msgrcv(first, p, second, msgtyp, third);
1338 up = (struct msgbuf32 *)uptr;
1339 if (put_user(p->mtype, &up->mtype) || copy_to_user(&up->mtext, p->mtext, err))
1348 msgctl32 (int first, int second, void *uptr)
1350 int err = -EINVAL, err2;
1351 struct msqid64_ds m64;
1352 struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr;
1353 struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr;
1354 mm_segment_t old_fs;
1355 int version = ipc_parse_version32(&second);
1361 err = sys_msgctl(first, second, (struct msqid_ds *)uptr);
1365 if (version == IPC_64) {
1366 err = get_user(m64.msg_perm.uid, &up64->msg_perm.uid);
1367 err |= get_user(m64.msg_perm.gid, &up64->msg_perm.gid);
1368 err |= get_user(m64.msg_perm.mode, &up64->msg_perm.mode);
1369 err |= get_user(m64.msg_qbytes, &up64->msg_qbytes);
1371 err = get_user(m64.msg_perm.uid, &up32->msg_perm.uid);
1372 err |= get_user(m64.msg_perm.gid, &up32->msg_perm.gid);
1373 err |= get_user(m64.msg_perm.mode, &up32->msg_perm.mode);
1374 err |= get_user(m64.msg_qbytes, &up32->msg_qbytes);
1380 err = sys_msgctl(first, second, (struct msqid_ds *)&m64);
1388 err = sys_msgctl(first, second, (struct msqid_ds *)&m64);
1391 if (version == IPC_64) {
1392 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
1396 err2 = __put_user(m64.msg_perm.key, &up64->msg_perm.key);
1397 err2 |= __put_user(m64.msg_perm.uid, &up64->msg_perm.uid);
1398 err2 |= __put_user(m64.msg_perm.gid, &up64->msg_perm.gid);
1399 err2 |= __put_user(m64.msg_perm.cuid, &up64->msg_perm.cuid);
1400 err2 |= __put_user(m64.msg_perm.cgid, &up64->msg_perm.cgid);
1401 err2 |= __put_user(m64.msg_perm.mode, &up64->msg_perm.mode);
1402 err2 |= __put_user(m64.msg_perm.seq, &up64->msg_perm.seq);
1403 err2 |= __put_user(m64.msg_stime, &up64->msg_stime);
1404 err2 |= __put_user(m64.msg_rtime, &up64->msg_rtime);
1405 err2 |= __put_user(m64.msg_ctime, &up64->msg_ctime);
1406 err2 |= __put_user(m64.msg_cbytes, &up64->msg_cbytes);
1407 err2 |= __put_user(m64.msg_qnum, &up64->msg_qnum);
1408 err2 |= __put_user(m64.msg_qbytes, &up64->msg_qbytes);
1409 err2 |= __put_user(m64.msg_lspid, &up64->msg_lspid);
1410 err2 |= __put_user(m64.msg_lrpid, &up64->msg_lrpid);
1414 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
1418 err2 = __put_user(m64.msg_perm.key, &up32->msg_perm.key);
1419 err2 |= __put_user(m64.msg_perm.uid, &up32->msg_perm.uid);
1420 err2 |= __put_user(m64.msg_perm.gid, &up32->msg_perm.gid);
1421 err2 |= __put_user(m64.msg_perm.cuid, &up32->msg_perm.cuid);
1422 err2 |= __put_user(m64.msg_perm.cgid, &up32->msg_perm.cgid);
1423 err2 |= __put_user(m64.msg_perm.mode, &up32->msg_perm.mode);
1424 err2 |= __put_user(m64.msg_perm.seq, &up32->msg_perm.seq);
1425 err2 |= __put_user(m64.msg_stime, &up32->msg_stime);
1426 err2 |= __put_user(m64.msg_rtime, &up32->msg_rtime);
1427 err2 |= __put_user(m64.msg_ctime, &up32->msg_ctime);
1428 err2 |= __put_user(m64.msg_cbytes, &up32->msg_cbytes);
1429 err2 |= __put_user(m64.msg_qnum, &up32->msg_qnum);
1430 err2 |= __put_user(m64.msg_qbytes, &up32->msg_qbytes);
1431 err2 |= __put_user(m64.msg_lspid, &up32->msg_lspid);
1432 err2 |= __put_user(m64.msg_lrpid, &up32->msg_lrpid);
1442 shmat32 (int first, int second, int third, int version, void *uptr)
1444 unsigned long raddr;
1445 u32 *uaddr = (u32 *)A((u32)third);
1449 return -EINVAL; /* iBCS2 emulator entry point: unsupported */
1450 err = sys_shmat(first, uptr, second, &raddr);
1453 return put_user(raddr, uaddr);
1457 shmctl32 (int first, int second, void *uptr)
1459 int err = -EFAULT, err2;
1461 struct shmid64_ds s64;
1462 struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
1463 struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
1464 mm_segment_t old_fs;
1465 struct shm_info32 *uip = (struct shm_info32 *)uptr;
1467 int version = ipc_parse_version32(&second);
1468 struct shminfo64 smi;
1469 struct shminfo *usi32 = (struct shminfo *) uptr;
1470 struct shminfo64_32 *usi64 = (struct shminfo64_32 *) uptr;
1476 err = sys_shmctl(first, second, (struct shmid_ds *)&smi);
1479 if (version == IPC_64) {
1480 if (!access_ok(VERIFY_WRITE, usi64, sizeof(*usi64))) {
1484 err2 = __put_user(smi.shmmax, &usi64->shmmax);
1485 err2 |= __put_user(smi.shmmin, &usi64->shmmin);
1486 err2 |= __put_user(smi.shmmni, &usi64->shmmni);
1487 err2 |= __put_user(smi.shmseg, &usi64->shmseg);
1488 err2 |= __put_user(smi.shmall, &usi64->shmall);
1490 if (!access_ok(VERIFY_WRITE, usi32, sizeof(*usi32))) {
1494 err2 = __put_user(smi.shmmax, &usi32->shmmax);
1495 err2 |= __put_user(smi.shmmin, &usi32->shmmin);
1496 err2 |= __put_user(smi.shmmni, &usi32->shmmni);
1497 err2 |= __put_user(smi.shmseg, &usi32->shmseg);
1498 err2 |= __put_user(smi.shmall, &usi32->shmall);
1507 err = sys_shmctl(first, second, (struct shmid_ds *)uptr);
1511 if (version == IPC_64) {
1512 err = get_user(s64.shm_perm.uid, &up64->shm_perm.uid);
1513 err |= get_user(s64.shm_perm.gid, &up64->shm_perm.gid);
1514 err |= get_user(s64.shm_perm.mode, &up64->shm_perm.mode);
1516 err = get_user(s64.shm_perm.uid, &up32->shm_perm.uid);
1517 err |= get_user(s64.shm_perm.gid, &up32->shm_perm.gid);
1518 err |= get_user(s64.shm_perm.mode, &up32->shm_perm.mode);
1524 err = sys_shmctl(first, second, (struct shmid_ds *)&s64);
1532 err = sys_shmctl(first, second, (struct shmid_ds *)&s64);
1536 if (version == IPC_64) {
1537 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
1541 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
1542 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
1543 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
1544 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
1545 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
1546 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
1547 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
1548 err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
1549 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
1550 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
1551 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
1552 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
1553 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
1554 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
1556 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
1560 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
1561 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
1562 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
1563 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
1564 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
1565 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
1566 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
1567 err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
1568 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
1569 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
1570 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
1571 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
1572 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
1573 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
1582 err = sys_shmctl(first, second, (void *)&si);
1587 if (!access_ok(VERIFY_WRITE, uip, sizeof(*uip))) {
1591 err2 = __put_user(si.used_ids, &uip->used_ids);
1592 err2 |= __put_user(si.shm_tot, &uip->shm_tot);
1593 err2 |= __put_user(si.shm_rss, &uip->shm_rss);
1594 err2 |= __put_user(si.shm_swp, &uip->shm_swp);
1595 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
1596 err2 |= __put_user(si.swap_successes, &uip->swap_successes);
1605 extern int sem_ctls[];
1606 #define sc_semopm (sem_ctls[2])
1609 semtimedop32(int semid, struct sembuf *tsops, int nsops,
1610 struct compat_timespec *timeout32)
1616 /* parameter checking precedence should mirror sys_semtimedop() */
1617 if (nsops < 1 || semid < 0)
1619 if (nsops > sc_semopm)
1621 if (!access_ok(VERIFY_READ, tsops, nsops * sizeof(struct sembuf)) ||
1622 get_compat_timespec(&t, timeout32))
1627 ret = sys_semtimedop(semid, tsops, nsops, &t);
1633 sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1637 version = call >> 16; /* hack for backward compatibility */
1643 return semtimedop32(first, (struct sembuf *)AA(ptr),
1644 second, (struct compat_timespec *)AA(fifth));
1645 /* else fall through for normal semop() */
1647 /* struct sembuf is the same on 32 and 64bit :)) */
1648 return sys_semtimedop(first, (struct sembuf *)AA(ptr), second,
1651 return sys_semget(first, second, third);
1653 return semctl32(first, second, third, (void *)AA(ptr));
1656 return do_sys32_msgsnd(first, second, third, (void *)AA(ptr));
1658 return do_sys32_msgrcv(first, second, fifth, third, version, (void *)AA(ptr));
1660 return sys_msgget((key_t) first, second);
1662 return msgctl32(first, second, (void *)AA(ptr));
1665 return shmat32(first, second, third, version, (void *)AA(ptr));
1668 return sys_shmdt((char *)AA(ptr));
1670 return sys_shmget(first, second, third);
1672 return shmctl32(first, second, (void *)AA(ptr));
1681 * sys_time() can be implemented in user-level using
1682 * sys_gettimeofday(). IA64 did this but i386 Linux did not
1683 * so we have to implement this system call here.
1686 sys32_time (int *tloc)
1690 /* SMP: This is fairly trivial. We grab CURRENT_TIME and
1691 stuff it to user space. No side effects */
1694 if (put_user(i, tloc))
1701 compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
1702 struct compat_rusage *ru);
1705 sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1707 return compat_sys_wait4(pid, stat_addr, options, NULL);
1711 ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val)
1716 copied = access_process_vm(child, addr, val, sizeof(*val), 0);
1717 return (copied != sizeof(ret)) ? -EIO : 0;
1721 ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val)
1724 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
1730 * The order in which registers are stored in the ptrace regs structure
1743 #define PT_ORIG_EAX 11
1751 getreg (struct task_struct *child, int regno)
1753 struct pt_regs *child_regs;
1755 child_regs = ia64_task_regs(child);
1756 switch (regno / sizeof(int)) {
1757 case PT_EBX: return child_regs->r11;
1758 case PT_ECX: return child_regs->r9;
1759 case PT_EDX: return child_regs->r10;
1760 case PT_ESI: return child_regs->r14;
1761 case PT_EDI: return child_regs->r15;
1762 case PT_EBP: return child_regs->r13;
1763 case PT_EAX: return child_regs->r8;
1764 case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
1765 case PT_EIP: return child_regs->cr_iip;
1766 case PT_UESP: return child_regs->r12;
1767 case PT_EFL: return child->thread.eflag;
1768 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1770 case PT_CS: return __USER_CS;
1772 printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
1779 putreg (struct task_struct *child, int regno, unsigned int value)
1781 struct pt_regs *child_regs;
1783 child_regs = ia64_task_regs(child);
1784 switch (regno / sizeof(int)) {
1785 case PT_EBX: child_regs->r11 = value; break;
1786 case PT_ECX: child_regs->r9 = value; break;
1787 case PT_EDX: child_regs->r10 = value; break;
1788 case PT_ESI: child_regs->r14 = value; break;
1789 case PT_EDI: child_regs->r15 = value; break;
1790 case PT_EBP: child_regs->r13 = value; break;
1791 case PT_EAX: child_regs->r8 = value; break;
1792 case PT_ORIG_EAX: child_regs->r1 = value; break;
1793 case PT_EIP: child_regs->cr_iip = value; break;
1794 case PT_UESP: child_regs->r12 = value; break;
1795 case PT_EFL: child->thread.eflag = value; break;
1796 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1797 if (value != __USER_DS)
1799 "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1803 if (value != __USER_CS)
1805 "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
1809 printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
1815 put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
1818 struct _fpreg_ia32 *f;
1821 f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
1822 if ((regno += tos) >= 8)
1826 ia64f2ia32f(f, &ptp->f8);
1829 ia64f2ia32f(f, &ptp->f9);
1832 ia64f2ia32f(f, &ptp->f10);
1835 ia64f2ia32f(f, &ptp->f11);
1841 ia64f2ia32f(f, &swp->f12 + (regno - 4));
1844 copy_to_user(reg, f, sizeof(*reg));
1848 get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
1852 if ((regno += tos) >= 8)
1856 copy_from_user(&ptp->f8, reg, sizeof(*reg));
1859 copy_from_user(&ptp->f9, reg, sizeof(*reg));
1862 copy_from_user(&ptp->f10, reg, sizeof(*reg));
1865 copy_from_user(&ptp->f11, reg, sizeof(*reg));
1871 copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
1878 save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
1880 struct switch_stack *swp;
1881 struct pt_regs *ptp;
1884 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1887 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1888 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1889 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1890 __put_user(tsk->thread.fir, &save->fip);
1891 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1892 __put_user(tsk->thread.fdr, &save->foo);
1893 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1896 * Stack frames start with 16-bytes of temp space
1898 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1899 ptp = ia64_task_regs(tsk);
1900 tos = (tsk->thread.fsr >> 11) & 7;
1901 for (i = 0; i < 8; i++)
1902 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
1907 restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
1909 struct switch_stack *swp;
1910 struct pt_regs *ptp;
1912 unsigned int fsrlo, fsrhi, num32;
1914 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1917 __get_user(num32, (unsigned int *)&save->cwd);
1918 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1919 __get_user(fsrlo, (unsigned int *)&save->swd);
1920 __get_user(fsrhi, (unsigned int *)&save->twd);
1921 num32 = (fsrhi << 16) | fsrlo;
1922 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1923 __get_user(num32, (unsigned int *)&save->fip);
1924 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1925 __get_user(num32, (unsigned int *)&save->foo);
1926 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1929 * Stack frames start with 16-bytes of temp space
1931 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1932 ptp = ia64_task_regs(tsk);
1933 tos = (tsk->thread.fsr >> 11) & 7;
1934 for (i = 0; i < 8; i++)
1935 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
1940 save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
1942 struct switch_stack *swp;
1943 struct pt_regs *ptp;
1945 unsigned long mxcsr=0;
1946 unsigned long num128[2];
1948 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1951 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1952 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1953 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1954 __put_user(tsk->thread.fir, &save->fip);
1955 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1956 __put_user(tsk->thread.fdr, &save->foo);
1957 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1960 * Stack frames start with 16-bytes of temp space
1962 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1963 ptp = ia64_task_regs(tsk);
1964 tos = (tsk->thread.fsr >> 11) & 7;
1965 for (i = 0; i < 8; i++)
1966 put_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
1968 mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
1969 __put_user(mxcsr & 0xffff, &save->mxcsr);
1970 for (i = 0; i < 8; i++) {
1971 memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
1972 memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
1973 copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
1979 restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
1981 struct switch_stack *swp;
1982 struct pt_regs *ptp;
1984 unsigned int fsrlo, fsrhi, num32;
1986 unsigned long num64;
1987 unsigned long num128[2];
1989 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1992 __get_user(num32, (unsigned int *)&save->cwd);
1993 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1994 __get_user(fsrlo, (unsigned int *)&save->swd);
1995 __get_user(fsrhi, (unsigned int *)&save->twd);
1996 num32 = (fsrhi << 16) | fsrlo;
1997 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1998 __get_user(num32, (unsigned int *)&save->fip);
1999 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
2000 __get_user(num32, (unsigned int *)&save->foo);
2001 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
2004 * Stack frames start with 16-bytes of temp space
2006 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
2007 ptp = ia64_task_regs(tsk);
2008 tos = (tsk->thread.fsr >> 11) & 7;
2009 for (i = 0; i < 8; i++)
2010 get_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
2012 __get_user(mxcsr, (unsigned int *)&save->mxcsr);
2013 num64 = mxcsr & 0xff10;
2014 tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000)) | (num64<<32);
2015 num64 = mxcsr & 0x3f;
2016 tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000)) | (num64<<32);
2018 for (i = 0; i < 8; i++) {
2019 copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
2020 memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
2021 memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
2026 extern asmlinkage long sys_ptrace (long, pid_t, unsigned long, unsigned long, long, long, long,
2030 * Note that the IA32 version of `ptrace' calls the IA64 routine for
2031 * many of the requests. This will only work for requests that do
2032 * not need access to the calling processes `pt_regs' which is located
2033 * at the address of `stack'. Once we call the IA64 `sys_ptrace' then
2034 * the address of `stack' will not be the address of the `pt_regs'.
2037 sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
2038 long arg4, long arg5, long arg6, long arg7, long stack)
2040 struct pt_regs *regs = (struct pt_regs *) &stack;
2041 struct task_struct *child;
2042 unsigned int value, tmp;
2046 if (request == PTRACE_TRACEME) {
2047 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
2052 read_lock(&tasklist_lock);
2053 child = find_task_by_pid(pid);
2055 get_task_struct(child);
2056 read_unlock(&tasklist_lock);
2060 if (pid == 1) /* no messing around with init! */
2063 if (request == PTRACE_ATTACH) {
2064 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
2068 ret = ptrace_check_attach(child, request == PTRACE_KILL);
2073 case PTRACE_PEEKTEXT:
2074 case PTRACE_PEEKDATA: /* read word at location addr */
2075 ret = ia32_peek(regs, child, addr, &value);
2077 ret = put_user(value, (unsigned int *) A(data));
2082 case PTRACE_POKETEXT:
2083 case PTRACE_POKEDATA: /* write the word at location addr */
2084 ret = ia32_poke(regs, child, addr, data);
2087 case PTRACE_PEEKUSR: /* read word at addr in USER area */
2089 if ((addr & 3) || addr > 17*sizeof(int))
2092 tmp = getreg(child, addr);
2093 if (!put_user(tmp, (unsigned int *) A(data)))
2097 case PTRACE_POKEUSR: /* write word at addr in USER area */
2099 if ((addr & 3) || addr > 17*sizeof(int))
2102 putreg(child, addr, data);
2106 case IA32_PTRACE_GETREGS:
2107 if (!access_ok(VERIFY_WRITE, (int *) A(data), 17*sizeof(int))) {
2111 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
2112 put_user(getreg(child, i), (unsigned int *) A(data));
2113 data += sizeof(int);
2118 case IA32_PTRACE_SETREGS:
2119 if (!access_ok(VERIFY_READ, (int *) A(data), 17*sizeof(int))) {
2123 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
2124 get_user(tmp, (unsigned int *) A(data));
2125 putreg(child, i, tmp);
2126 data += sizeof(int);
2131 case IA32_PTRACE_GETFPREGS:
2132 ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct *) A(data));
2135 case IA32_PTRACE_GETFPXREGS:
2136 ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct *) A(data));
2139 case IA32_PTRACE_SETFPREGS:
2140 ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct *) A(data));
2143 case IA32_PTRACE_SETFPXREGS:
2144 ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct *) A(data));
2147 case PTRACE_SYSCALL: /* continue, stop after next syscall */
2148 case PTRACE_CONT: /* restart after signal. */
2150 case PTRACE_SINGLESTEP: /* execute chile for one instruction */
2151 case PTRACE_DETACH: /* detach a process */
2152 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
2156 ret = ptrace_request(child, request, addr, data);
2161 put_task_struct(child);
2168 * The IA64 maps 4 I/O ports for each 4K page
2170 #define IOLEN ((65536 / 4) * 4096)
2173 sys32_iopl (int level)
2175 extern unsigned long ia64_iobase;
2180 mm_segment_t old_fs = get_fs ();
2184 /* Trying to gain more privileges? */
2185 old = ia64_getreg(_IA64_REG_AR_EFLAG);
2186 if ((unsigned int) level > ((old >> 12) & 3)) {
2187 if (!capable(CAP_SYS_RAWIO))
2191 fd = sys_open("/dev/mem", O_SYNC | O_RDWR, 0);
2201 down_write(¤t->mm->mmap_sem);
2202 addr = do_mmap_pgoff(file, IA32_IOBASE,
2203 IOLEN, PROT_READ|PROT_WRITE, MAP_SHARED,
2204 (ia64_iobase & ~PAGE_OFFSET) >> PAGE_SHIFT);
2205 up_write(¤t->mm->mmap_sem);
2208 old = (old & ~0x3000) | (level << 12);
2209 ia64_setreg(_IA64_REG_AR_EFLAG, old);
2218 sys32_ioperm (unsigned int from, unsigned int num, int on)
2222 * Since IA64 doesn't have permission bits we'd have to go to
2223 * a lot of trouble to simulate them in software. There's
2224 * no point, only trusted programs can make this call so we'll
2225 * just turn it into an iopl call and let the process have
2226 * access to all I/O ports.
2228 * XXX proper ioperm() support should be emulated by
2229 * manipulating the page protections...
2231 return sys32_iopl(3);
2236 unsigned int ss_flags;
2237 unsigned int ss_size;
2241 sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32,
2242 long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, long stack)
2244 struct pt_regs *pt = (struct pt_regs *) &stack;
2248 mm_segment_t old_fs = get_fs();
2251 if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
2253 uss.ss_sp = (void *) (long) buf32.ss_sp;
2254 uss.ss_flags = buf32.ss_flags;
2255 /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
2256 check and set it to the user requested value later */
2257 if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
2261 uss.ss_size = MINSIGSTKSZ;
2263 ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12);
2264 current->sas_ss_size = buf32.ss_size;
2270 buf32.ss_sp = (long) uoss.ss_sp;
2271 buf32.ss_flags = uoss.ss_flags;
2272 buf32.ss_size = uoss.ss_size;
2273 if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
2282 current->state = TASK_INTERRUPTIBLE;
2284 return -ERESTARTNOHAND;
2287 asmlinkage long sys_msync (unsigned long start, size_t len, int flags);
2290 sys32_msync (unsigned int start, unsigned int len, int flags)
2294 if (OFFSET4K(start))
2296 addr = PAGE_START(start);
2297 return sys_msync(addr, len + (start - addr), flags);
2303 unsigned int oldval;
2304 unsigned int oldlenp;
2305 unsigned int newval;
2306 unsigned int newlen;
2307 unsigned int __unused[4];
2310 extern asmlinkage long sys_sysctl(struct __sysctl_args *args);
2313 sys32_sysctl (struct sysctl32 *args)
2315 #ifdef CONFIG_SYSCTL
2316 struct sysctl32 a32;
2317 mm_segment_t old_fs = get_fs ();
2318 void *oldvalp, *newvalp;
2323 if (copy_from_user(&a32, args, sizeof(a32)))
2327 * We need to pre-validate these because we have to disable address checking
2328 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
2329 * user specifying bad addresses here. Well, since we're dealing with 32 bit
2330 * addresses, we KNOW that access_ok() will always succeed, so this is an
2331 * expensive NOP, but so what...
2333 namep = (int *) A(a32.name);
2334 oldvalp = (void *) A(a32.oldval);
2335 newvalp = (void *) A(a32.newval);
2337 if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp)))
2338 || !access_ok(VERIFY_WRITE, namep, 0)
2339 || !access_ok(VERIFY_WRITE, oldvalp, 0)
2340 || !access_ok(VERIFY_WRITE, newvalp, 0))
2345 ret = do_sysctl(namep, a32.nlen, oldvalp, &oldlen, newvalp, (size_t) a32.newlen);
2349 if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp)))
2359 sys32_newuname (struct new_utsname *name)
2361 extern asmlinkage long sys_newuname(struct new_utsname * name);
2362 int ret = sys_newuname(name);
2365 if (copy_to_user(name->machine, "i686\0\0\0", 8))
2370 extern asmlinkage long sys_getresuid (uid_t *ruid, uid_t *euid, uid_t *suid);
2373 sys32_getresuid16 (u16 *ruid, u16 *euid, u16 *suid)
2377 mm_segment_t old_fs = get_fs();
2380 ret = sys_getresuid(&a, &b, &c);
2383 if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
2388 extern asmlinkage long sys_getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid);
2391 sys32_getresgid16 (u16 *rgid, u16 *egid, u16 *sgid)
2395 mm_segment_t old_fs = get_fs();
2398 ret = sys_getresgid(&a, &b, &c);
2404 return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
2408 sys32_lseek (unsigned int fd, int offset, unsigned int whence)
2410 extern off_t sys_lseek (unsigned int fd, off_t offset, unsigned int origin);
2412 /* Sign-extension of "offset" is important here... */
2413 return sys_lseek(fd, offset, whence);
2417 groups16_to_user(short *grouplist, struct group_info *group_info)
2422 for (i = 0; i < group_info->ngroups; i++) {
2423 group = (short)GROUP_AT(group_info, i);
2424 if (put_user(group, grouplist+i))
2432 groups16_from_user(struct group_info *group_info, short *grouplist)
2437 for (i = 0; i < group_info->ngroups; i++) {
2438 if (get_user(group, grouplist+i))
2440 GROUP_AT(group_info, i) = (gid_t)group;
2447 sys32_getgroups16 (int gidsetsize, short *grouplist)
2454 get_group_info(current->group_info);
2455 i = current->group_info->ngroups;
2457 if (i > gidsetsize) {
2461 if (groups16_to_user(grouplist, current->group_info)) {
2467 put_group_info(current->group_info);
2472 sys32_setgroups16 (int gidsetsize, short *grouplist)
2474 struct group_info *group_info;
2477 if (!capable(CAP_SETGID))
2479 if ((unsigned)gidsetsize > NGROUPS_MAX)
2482 group_info = groups_alloc(gidsetsize);
2485 retval = groups16_from_user(group_info, grouplist);
2487 put_group_info(group_info);
2491 retval = set_current_groups(group_info);
2492 put_group_info(group_info);
2498 sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
2500 extern asmlinkage long sys_truncate (const char *path, unsigned long length);
2502 return sys_truncate((const char *) A(path), ((unsigned long) len_hi << 32) | len_lo);
2506 sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
2508 extern asmlinkage long sys_ftruncate (int fd, unsigned long length);
2510 return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
2514 putstat64 (struct stat64 *ubuf, struct kstat *kbuf)
2519 if (clear_user(ubuf, sizeof(*ubuf)))
2522 hdev = huge_encode_dev(kbuf->dev);
2523 err = __put_user(hdev, (u32*)&ubuf->st_dev);
2524 err |= __put_user(hdev >> 32, ((u32*)&ubuf->st_dev) + 1);
2525 err |= __put_user(kbuf->ino, &ubuf->__st_ino);
2526 err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
2527 err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
2528 err |= __put_user(kbuf->mode, &ubuf->st_mode);
2529 err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
2530 err |= __put_user(kbuf->uid, &ubuf->st_uid);
2531 err |= __put_user(kbuf->gid, &ubuf->st_gid);
2532 hdev = huge_encode_dev(kbuf->rdev);
2533 err = __put_user(hdev, (u32*)&ubuf->st_rdev);
2534 err |= __put_user(hdev >> 32, ((u32*)&ubuf->st_rdev) + 1);
2535 err |= __put_user(kbuf->size, &ubuf->st_size_lo);
2536 err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
2537 err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
2538 err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
2539 err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
2540 err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
2541 err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
2542 err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
2543 err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
2544 err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
2549 sys32_stat64 (char *filename, struct stat64 *statbuf)
2552 long ret = vfs_stat(filename, &s);
2554 ret = putstat64(statbuf, &s);
2559 sys32_lstat64 (char *filename, struct stat64 *statbuf)
2562 long ret = vfs_lstat(filename, &s);
2564 ret = putstat64(statbuf, &s);
2569 sys32_fstat64 (unsigned int fd, struct stat64 *statbuf)
2572 long ret = vfs_fstat(fd, &s);
2574 ret = putstat64(statbuf, &s);
2596 sys32_sysinfo (struct sysinfo32 *info)
2598 extern asmlinkage long sys_sysinfo (struct sysinfo *);
2602 mm_segment_t old_fs = get_fs();
2605 ret = sys_sysinfo(&s);
2607 /* Check to see if any memory value is too large for 32-bit and
2608 * scale down if needed.
2610 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2611 while (s.mem_unit < PAGE_SIZE) {
2615 s.totalram >>= bitcount;
2616 s.freeram >>= bitcount;
2617 s.sharedram >>= bitcount;
2618 s.bufferram >>= bitcount;
2619 s.totalswap >>= bitcount;
2620 s.freeswap >>= bitcount;
2621 s.totalhigh >>= bitcount;
2622 s.freehigh >>= bitcount;
2625 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
2628 err = __put_user(s.uptime, &info->uptime);
2629 err |= __put_user(s.loads[0], &info->loads[0]);
2630 err |= __put_user(s.loads[1], &info->loads[1]);
2631 err |= __put_user(s.loads[2], &info->loads[2]);
2632 err |= __put_user(s.totalram, &info->totalram);
2633 err |= __put_user(s.freeram, &info->freeram);
2634 err |= __put_user(s.sharedram, &info->sharedram);
2635 err |= __put_user(s.bufferram, &info->bufferram);
2636 err |= __put_user(s.totalswap, &info->totalswap);
2637 err |= __put_user(s.freeswap, &info->freeswap);
2638 err |= __put_user(s.procs, &info->procs);
2639 err |= __put_user (s.totalhigh, &info->totalhigh);
2640 err |= __put_user (s.freehigh, &info->freehigh);
2641 err |= __put_user (s.mem_unit, &info->mem_unit);
2648 sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec *interval)
2650 extern asmlinkage long sys_sched_rr_get_interval (pid_t, struct timespec *);
2651 mm_segment_t old_fs = get_fs();
2656 ret = sys_sched_rr_get_interval(pid, &t);
2658 if (put_compat_timespec(&t, interval))
2664 sys32_pread (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2666 extern asmlinkage long sys_pread64 (unsigned int, char *, size_t, loff_t);
2667 return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2671 sys32_pwrite (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2673 extern asmlinkage long sys_pwrite64 (unsigned int, const char *, size_t, loff_t);
2674 return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2678 sys32_sendfile (int out_fd, int in_fd, int *offset, unsigned int count)
2680 extern asmlinkage long sys_sendfile (int, int, off_t *, size_t);
2681 mm_segment_t old_fs = get_fs();
2685 if (offset && get_user(of, offset))
2689 ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
2692 if (!ret && offset && put_user(of, offset))
2699 sys32_personality (unsigned int personality)
2701 extern asmlinkage long sys_personality (unsigned long);
2704 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
2705 personality = PER_LINUX32;
2706 ret = sys_personality(personality);
2707 if (ret == PER_LINUX32)
2712 asmlinkage unsigned long
2713 sys32_brk (unsigned int brk)
2715 unsigned long ret, obrk;
2716 struct mm_struct *mm = current->mm;
2721 clear_user((void *) ret, PAGE_ALIGN(ret) - ret);
2726 * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
2729 sys32_open (const char * filename, int flags, int mode)
2734 tmp = getname(filename);
2737 fd = get_unused_fd();
2739 struct file *f = filp_open(tmp, flags, mode);
2756 /* Structure for ia32 emulation on ia64 */
2757 struct epoll_event32
2764 sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event)
2766 mm_segment_t old_fs = get_fs();
2767 struct epoll_event event64;
2768 int error = -EFAULT;
2771 if ((error = verify_area(VERIFY_READ, event,
2772 sizeof(struct epoll_event32))))
2775 __get_user(event64.events, &event->events);
2776 __get_user(data_halfword, &event->data[0]);
2777 event64.data = data_halfword;
2778 __get_user(data_halfword, &event->data[1]);
2779 event64.data |= (u64)data_halfword << 32;
2782 error = sys_epoll_ctl(epfd, op, fd, &event64);
2789 sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
2792 struct epoll_event *events64 = NULL;
2793 mm_segment_t old_fs = get_fs();
2794 int error, numevents, size;
2796 int do_free_pages = 0;
2798 if (maxevents <= 0) {
2802 /* Verify that the area passed by the user is writeable */
2803 if ((error = verify_area(VERIFY_WRITE, events,
2804 maxevents * sizeof(struct epoll_event32))))
2808 * Allocate space for the intermediate copy. If the space needed
2809 * is large enough to cause kmalloc to fail, then try again with
2812 size = maxevents * sizeof(struct epoll_event);
2813 events64 = kmalloc(size, GFP_KERNEL);
2814 if (events64 == NULL) {
2815 events64 = (struct epoll_event *)
2816 __get_free_pages(GFP_KERNEL, get_order(size));
2817 if (events64 == NULL)
2822 /* Do the system call */
2823 set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
2824 numevents = sys_epoll_wait(epfd, events64, maxevents, timeout);
2827 /* Don't modify userspace memory if we're returning an error */
2828 if (numevents > 0) {
2829 /* Translate the 64-bit structures back into the 32-bit
2831 for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
2832 __put_user(events64[evt_idx].events,
2833 &events[evt_idx].events);
2834 __put_user((u32)events64[evt_idx].data,
2835 &events[evt_idx].data[0]);
2836 __put_user((u32)(events64[evt_idx].data >> 32),
2837 &events[evt_idx].data[1]);
2842 free_pages((unsigned long) events64, get_order(size));
2849 * Get a yet unused TLS descriptor index.
2854 struct thread_struct *t = ¤t->thread;
2857 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
2858 if (desc_empty(t->tls_array + idx))
2859 return idx + GDT_ENTRY_TLS_MIN;
2864 * Set a given TLS descriptor:
2867 sys32_set_thread_area (struct ia32_user_desc *u_info)
2869 struct thread_struct *t = ¤t->thread;
2870 struct ia32_user_desc info;
2871 struct desc_struct *desc;
2874 if (copy_from_user(&info, u_info, sizeof(info)))
2876 idx = info.entry_number;
2879 * index -1 means the kernel should try to find and allocate an empty descriptor:
2882 idx = get_free_idx();
2885 if (put_user(idx, &u_info->entry_number))
2889 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2892 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
2894 cpu = smp_processor_id();
2896 if (LDT_empty(&info)) {
2900 desc->a = LDT_entry_a(&info);
2901 desc->b = LDT_entry_b(&info);
2908 * Get the current Thread-Local Storage area:
2911 #define GET_BASE(desc) ( \
2912 (((desc)->a >> 16) & 0x0000ffff) | \
2913 (((desc)->b << 16) & 0x00ff0000) | \
2914 ( (desc)->b & 0xff000000) )
2916 #define GET_LIMIT(desc) ( \
2917 ((desc)->a & 0x0ffff) | \
2918 ((desc)->b & 0xf0000) )
2920 #define GET_32BIT(desc) (((desc)->b >> 23) & 1)
2921 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
2922 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
2923 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
2924 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
2925 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
2928 sys32_get_thread_area (struct ia32_user_desc *u_info)
2930 struct ia32_user_desc info;
2931 struct desc_struct *desc;
2934 if (get_user(idx, &u_info->entry_number))
2936 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2939 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
2941 info.entry_number = idx;
2942 info.base_addr = GET_BASE(desc);
2943 info.limit = GET_LIMIT(desc);
2944 info.seg_32bit = GET_32BIT(desc);
2945 info.contents = GET_CONTENTS(desc);
2946 info.read_exec_only = !GET_WRITABLE(desc);
2947 info.limit_in_pages = GET_LIMIT_PAGES(desc);
2948 info.seg_not_present = !GET_PRESENT(desc);
2949 info.useable = GET_USEABLE(desc);
2951 if (copy_to_user(u_info, &info, sizeof(info)))
2956 extern asmlinkage long
2957 sys_timer_create(clockid_t which_clock, struct sigevent *timer_event_spec,
2958 timer_t * created_timer_id);
2961 sys32_timer_create(u32 clock, struct sigevent32 *se32, timer_t *timer_id)
2969 return sys_timer_create(clock, NULL, timer_id);
2971 memset(&se, 0, sizeof(struct sigevent));
2972 if (get_user(se.sigev_value.sival_int, &se32->sigev_value.sival_int) ||
2973 __get_user(se.sigev_signo, &se32->sigev_signo) ||
2974 __get_user(se.sigev_notify, &se32->sigev_notify) ||
2975 __copy_from_user(&se._sigev_un._pad, &se32->_sigev_un._pad,
2976 sizeof(se._sigev_un._pad)))
2979 if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
2984 err = sys_timer_create(clock, &se, &t);
2988 err = __put_user (t, timer_id);
2993 extern long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
2995 long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
2996 __u32 len_low, __u32 len_high, int advice)
2998 return sys_fadvise64_64(fd,
2999 (((u64)offset_high)<<32) | offset_low,
3000 (((u64)len_high)<<32) | len_low,
3004 #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
3006 struct ncp_mount_data32 {
3008 unsigned int ncp_fd;
3009 compat_uid_t mounted_uid;
3011 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
3012 unsigned int time_out;
3013 unsigned int retry_count;
3017 compat_mode_t file_mode;
3018 compat_mode_t dir_mode;
3022 do_ncp_super_data_conv(void *raw_data)
3024 struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data;
3025 struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data;
3027 n->dir_mode = n32->dir_mode;
3028 n->file_mode = n32->file_mode;
3031 memmove (n->mounted_vol, n32->mounted_vol,
3032 (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int)));
3033 n->wdog_pid = n32->wdog_pid;
3034 n->mounted_uid = n32->mounted_uid;
3038 struct smb_mount_data32 {
3040 compat_uid_t mounted_uid;
3043 compat_mode_t file_mode;
3044 compat_mode_t dir_mode;
3048 do_smb_super_data_conv(void *raw_data)
3050 struct smb_mount_data *s = (struct smb_mount_data *)raw_data;
3051 struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data;
3053 if (s32->version != SMB_MOUNT_OLDVERSION)
3055 s->version = s32->version;
3056 s->mounted_uid = s32->mounted_uid;
3059 s->file_mode = s32->file_mode;
3060 s->dir_mode = s32->dir_mode;
3066 copy_mount_stuff_to_kernel(const void *user, unsigned long *kernel)
3070 struct vm_area_struct *vma;
3075 vma = find_vma(current->mm, (unsigned long)user);
3076 if(!vma || (unsigned long)user < vma->vm_start)
3078 if(!(vma->vm_flags & VM_READ))
3080 i = vma->vm_end - (unsigned long) user;
3081 if(PAGE_SIZE <= (unsigned long) i)
3083 if(!(page = __get_free_page(GFP_KERNEL)))
3085 if(copy_from_user((void *) page, user, i)) {
3093 extern asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
3094 unsigned long new_flags, void *data);
3096 #define SMBFS_NAME "smbfs"
3097 #define NCPFS_NAME "ncpfs"
3100 sys32_mount(char *dev_name, char *dir_name, char *type,
3101 unsigned long new_flags, u32 data)
3103 unsigned long type_page;
3104 int err, is_smb, is_ncp;
3106 if(!capable(CAP_SYS_ADMIN))
3108 is_smb = is_ncp = 0;
3109 err = copy_mount_stuff_to_kernel((const void *)type, &type_page);
3113 is_smb = !strcmp((char *)type_page, SMBFS_NAME);
3114 is_ncp = !strcmp((char *)type_page, NCPFS_NAME);
3116 if(!is_smb && !is_ncp) {
3118 free_page(type_page);
3119 return sys_mount(dev_name, dir_name, type, new_flags,
3122 unsigned long dev_page, dir_page, data_page;
3124 err = copy_mount_stuff_to_kernel((const void *)dev_name,
3128 err = copy_mount_stuff_to_kernel((const void *)dir_name,
3132 err = copy_mount_stuff_to_kernel((const void *)AA(data),
3137 do_ncp_super_data_conv((void *)data_page);
3139 do_smb_super_data_conv((void *)data_page);
3141 panic("The problem is here...");
3142 err = do_mount((char *)dev_page, (char *)dir_page,
3143 (char *)type_page, new_flags,
3146 free_page(data_page);
3149 free_page(dir_page);
3152 free_page(dev_page);
3155 free_page(type_page);
3160 extern asmlinkage long sys_setreuid(uid_t ruid, uid_t euid);
3162 asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
3166 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
3167 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
3168 return sys_setreuid(sruid, seuid);
3171 extern asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
3174 sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
3177 uid_t sruid, seuid, ssuid;
3179 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
3180 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
3181 ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
3182 return sys_setresuid(sruid, seuid, ssuid);
3185 extern asmlinkage long sys_setregid(gid_t rgid, gid_t egid);
3188 sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
3192 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
3193 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
3194 return sys_setregid(srgid, segid);
3197 extern asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
3200 sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
3203 gid_t srgid, segid, ssgid;
3205 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
3206 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
3207 ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
3208 return sys_setresgid(srgid, segid, ssgid);
3211 /* Stuff for NFS server syscalls... */
3212 struct nfsctl_svc32 {
3217 struct nfsctl_client32 {
3218 s8 cl32_ident[NFSCLNT_IDMAX+1];
3220 struct in_addr cl32_addrlist[NFSCLNT_ADDRMAX];
3223 u8 cl32_fhkey[NFSCLNT_KEYMAX];
3226 struct nfsctl_export32 {
3227 s8 ex32_client[NFSCLNT_IDMAX+1];
3228 s8 ex32_path[NFS_MAXPATHLEN+1];
3229 compat_dev_t ex32_dev;
3230 compat_ino_t ex32_ino;
3232 compat_uid_t ex32_anon_uid;
3233 compat_gid_t ex32_anon_gid;
3236 struct nfsctl_arg32 {
3237 s32 ca32_version; /* safeguard */
3239 struct nfsctl_svc32 u32_svc;
3240 struct nfsctl_client32 u32_client;
3241 struct nfsctl_export32 u32_export;
3244 #define ca32_svc u.u32_svc
3245 #define ca32_client u.u32_client
3246 #define ca32_export u.u32_export
3247 #define ca32_debug u.u32_debug
3250 union nfsctl_res32 {
3251 struct knfs_fh cr32_getfh;
3256 nfs_svc32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
3260 err = __get_user(karg->ca_version, &arg32->ca32_version);
3261 err |= __get_user(karg->ca_svc.svc_port, &arg32->ca32_svc.svc32_port);
3262 err |= __get_user(karg->ca_svc.svc_nthreads,
3263 &arg32->ca32_svc.svc32_nthreads);
3268 nfs_clnt32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
3272 err = __get_user(karg->ca_version, &arg32->ca32_version);
3273 err |= copy_from_user(&karg->ca_client.cl_ident[0],
3274 &arg32->ca32_client.cl32_ident[0],
3276 err |= __get_user(karg->ca_client.cl_naddr,
3277 &arg32->ca32_client.cl32_naddr);
3278 err |= copy_from_user(&karg->ca_client.cl_addrlist[0],
3279 &arg32->ca32_client.cl32_addrlist[0],
3280 (sizeof(struct in_addr) * NFSCLNT_ADDRMAX));
3281 err |= __get_user(karg->ca_client.cl_fhkeytype,
3282 &arg32->ca32_client.cl32_fhkeytype);
3283 err |= __get_user(karg->ca_client.cl_fhkeylen,
3284 &arg32->ca32_client.cl32_fhkeylen);
3285 err |= copy_from_user(&karg->ca_client.cl_fhkey[0],
3286 &arg32->ca32_client.cl32_fhkey[0],
3292 nfs_exp32_trans(struct nfsctl_arg *karg, struct nfsctl_arg32 *arg32)
3296 err = __get_user(karg->ca_version, &arg32->ca32_version);
3297 err |= copy_from_user(&karg->ca_export.ex_client[0],
3298 &arg32->ca32_export.ex32_client[0],
3300 err |= copy_from_user(&karg->ca_export.ex_path[0],
3301 &arg32->ca32_export.ex32_path[0],
3303 err |= __get_user(karg->ca_export.ex_dev,
3304 &arg32->ca32_export.ex32_dev);
3305 err |= __get_user(karg->ca_export.ex_ino,
3306 &arg32->ca32_export.ex32_ino);
3307 err |= __get_user(karg->ca_export.ex_flags,
3308 &arg32->ca32_export.ex32_flags);
3309 err |= __get_user(karg->ca_export.ex_anon_uid,
3310 &arg32->ca32_export.ex32_anon_uid);
3311 err |= __get_user(karg->ca_export.ex_anon_gid,
3312 &arg32->ca32_export.ex32_anon_gid);
3317 nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32)
3321 err = copy_to_user(&res32->cr32_getfh,
3323 sizeof(res32->cr32_getfh));
3324 err |= __put_user(kres->cr_debug, &res32->cr32_debug);
3328 extern asmlinkage long sys_nfsservctl(int cmd, void *arg, void *resp);
3331 sys32_nfsservctl(int cmd, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32)
3333 struct nfsctl_arg *karg = NULL;
3334 union nfsctl_res *kres = NULL;
3338 karg = kmalloc(sizeof(*karg), GFP_USER);
3342 kres = kmalloc(sizeof(*kres), GFP_USER);
3350 err = nfs_svc32_trans(karg, arg32);
3352 case NFSCTL_ADDCLIENT:
3353 err = nfs_clnt32_trans(karg, arg32);
3355 case NFSCTL_DELCLIENT:
3356 err = nfs_clnt32_trans(karg, arg32);
3359 err = nfs_exp32_trans(karg, arg32);
3369 err = sys_nfsservctl(cmd, karg, kres);
3372 if(!err && cmd == NFSCTL_GETFS)
3373 err = nfs_getfh32_res_trans(kres, res32);
3383 /* Handle adjtimex compatibility. */
3387 s32 offset, freq, maxerror, esterror;
3388 s32 status, constant, precision, tolerance;
3389 struct compat_timeval time;
3391 s32 ppsfreq, jitter, shift, stabil;
3392 s32 jitcnt, calcnt, errcnt, stbcnt;
3393 s32 :32; s32 :32; s32 :32; s32 :32;
3394 s32 :32; s32 :32; s32 :32; s32 :32;
3395 s32 :32; s32 :32; s32 :32; s32 :32;
3398 extern int do_adjtimex(struct timex *);
3401 sys32_adjtimex(struct timex32 *utp)
3406 memset(&txc, 0, sizeof(struct timex));
3408 if(get_user(txc.modes, &utp->modes) ||
3409 __get_user(txc.offset, &utp->offset) ||
3410 __get_user(txc.freq, &utp->freq) ||
3411 __get_user(txc.maxerror, &utp->maxerror) ||
3412 __get_user(txc.esterror, &utp->esterror) ||
3413 __get_user(txc.status, &utp->status) ||
3414 __get_user(txc.constant, &utp->constant) ||
3415 __get_user(txc.precision, &utp->precision) ||
3416 __get_user(txc.tolerance, &utp->tolerance) ||
3417 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
3418 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
3419 __get_user(txc.tick, &utp->tick) ||
3420 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
3421 __get_user(txc.jitter, &utp->jitter) ||
3422 __get_user(txc.shift, &utp->shift) ||
3423 __get_user(txc.stabil, &utp->stabil) ||
3424 __get_user(txc.jitcnt, &utp->jitcnt) ||
3425 __get_user(txc.calcnt, &utp->calcnt) ||
3426 __get_user(txc.errcnt, &utp->errcnt) ||
3427 __get_user(txc.stbcnt, &utp->stbcnt))
3430 ret = do_adjtimex(&txc);
3432 if(put_user(txc.modes, &utp->modes) ||
3433 __put_user(txc.offset, &utp->offset) ||
3434 __put_user(txc.freq, &utp->freq) ||
3435 __put_user(txc.maxerror, &utp->maxerror) ||
3436 __put_user(txc.esterror, &utp->esterror) ||
3437 __put_user(txc.status, &utp->status) ||
3438 __put_user(txc.constant, &utp->constant) ||
3439 __put_user(txc.precision, &utp->precision) ||
3440 __put_user(txc.tolerance, &utp->tolerance) ||
3441 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
3442 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
3443 __put_user(txc.tick, &utp->tick) ||
3444 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
3445 __put_user(txc.jitter, &utp->jitter) ||
3446 __put_user(txc.shift, &utp->shift) ||
3447 __put_user(txc.stabil, &utp->stabil) ||
3448 __put_user(txc.jitcnt, &utp->jitcnt) ||
3449 __put_user(txc.calcnt, &utp->calcnt) ||
3450 __put_user(txc.errcnt, &utp->errcnt) ||
3451 __put_user(txc.stbcnt, &utp->stbcnt))