2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
8 #include <linux/config.h>
10 #include <linux/highmem.h>
11 #include <linux/blkdev.h>
12 #include <asm/uaccess.h>
15 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
23 #define movsl_is_ok(a1,a2,n) \
24 __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
27 * Copy a null terminated string from userspace.
30 #define __do_strncpy_from_user(dst,src,count,res) \
32 int __d0, __d1, __d2; \
33 __asm__ __volatile__( \
38 " testb %%al,%%al\n" \
44 ".section .fixup,\"ax\"\n" \
48 ".section __ex_table,\"a\"\n" \
52 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
54 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
59 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
60 * @dst: Destination address, in kernel space. This buffer must be at
61 * least @count bytes long.
62 * @src: Source address, in user space.
63 * @count: Maximum number of bytes to copy, including the trailing NUL.
65 * Copies a NUL-terminated string from userspace to kernel space.
66 * Caller must check the specified block with access_ok() before calling
69 * On success, returns the length of the string (not including the trailing
72 * If access to userspace fails, returns -EFAULT (some data may have been
75 * If @count is smaller than the length of the string, copies @count bytes
79 __strncpy_from_user(char *dst, const char __user *src, long count)
82 __do_strncpy_from_user(dst, src, count, res);
87 * strncpy_from_user: - Copy a NUL terminated string from userspace.
88 * @dst: Destination address, in kernel space. This buffer must be at
89 * least @count bytes long.
90 * @src: Source address, in user space.
91 * @count: Maximum number of bytes to copy, including the trailing NUL.
93 * Copies a NUL-terminated string from userspace to kernel space.
95 * On success, returns the length of the string (not including the trailing
98 * If access to userspace fails, returns -EFAULT (some data may have been
101 * If @count is smaller than the length of the string, copies @count bytes
102 * and returns @count.
105 strncpy_from_user(char *dst, const char __user *src, long count)
108 if (access_ok(VERIFY_READ, src, 1))
109 __do_strncpy_from_user(dst, src, count, res);
118 #define __do_clear_user(addr,size) \
121 __asm__ __volatile__( \
126 ".section .fixup,\"ax\"\n" \
127 "3: lea 0(%2,%0,4),%0\n" \
130 ".section __ex_table,\"a\"\n" \
135 : "=&c"(size), "=&D" (__d0) \
136 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
140 * clear_user: - Zero a block of memory in user space.
141 * @to: Destination address, in user space.
142 * @n: Number of bytes to zero.
144 * Zero a block of memory in user space.
146 * Returns number of bytes that could not be cleared.
147 * On success, this will be zero.
150 clear_user(void __user *to, unsigned long n)
153 if (access_ok(VERIFY_WRITE, to, n))
154 __do_clear_user(to, n);
159 * __clear_user: - Zero a block of memory in user space, with less checking.
160 * @to: Destination address, in user space.
161 * @n: Number of bytes to zero.
163 * Zero a block of memory in user space. Caller must check
164 * the specified block with access_ok() before calling this function.
166 * Returns number of bytes that could not be cleared.
167 * On success, this will be zero.
170 __clear_user(void __user *to, unsigned long n)
172 __do_clear_user(to, n);
177 * strlen_user: - Get the size of a string in user space.
178 * @s: The string to measure.
179 * @n: The maximum valid length
181 * Get the size of a NUL-terminated string in user space.
183 * Returns the size of the string INCLUDING the terminating NUL.
184 * On exception, returns 0.
185 * If the string is too long, returns a value greater than @n.
187 long strnlen_user(const char __user *s, long n)
189 unsigned long mask = -__addr_ok(s);
190 unsigned long res, tmp;
194 __asm__ __volatile__(
203 ".section .fixup,\"ax\"\n"
204 "2: xorl %%eax,%%eax\n"
209 ".section __ex_table,\"a\"\n"
213 :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
214 :"0" (n), "1" (s), "2" (0), "3" (mask)
219 #ifdef CONFIG_X86_INTEL_USERCOPY
221 __copy_user_intel(void *to, const void *from,unsigned long size)
224 __asm__ __volatile__(
226 "0: movl 32(%4), %%eax\n"
229 " movl 64(%4), %%eax\n"
231 "1: movl 0(%4), %%eax\n"
232 " movl 4(%4), %%edx\n"
233 "2: movl %%eax, 0(%3)\n"
234 "21: movl %%edx, 4(%3)\n"
235 " movl 8(%4), %%eax\n"
236 " movl 12(%4),%%edx\n"
237 "3: movl %%eax, 8(%3)\n"
238 "31: movl %%edx, 12(%3)\n"
239 " movl 16(%4), %%eax\n"
240 " movl 20(%4), %%edx\n"
241 "4: movl %%eax, 16(%3)\n"
242 "41: movl %%edx, 20(%3)\n"
243 " movl 24(%4), %%eax\n"
244 " movl 28(%4), %%edx\n"
245 "10: movl %%eax, 24(%3)\n"
246 "51: movl %%edx, 28(%3)\n"
247 " movl 32(%4), %%eax\n"
248 " movl 36(%4), %%edx\n"
249 "11: movl %%eax, 32(%3)\n"
250 "61: movl %%edx, 36(%3)\n"
251 " movl 40(%4), %%eax\n"
252 " movl 44(%4), %%edx\n"
253 "12: movl %%eax, 40(%3)\n"
254 "71: movl %%edx, 44(%3)\n"
255 " movl 48(%4), %%eax\n"
256 " movl 52(%4), %%edx\n"
257 "13: movl %%eax, 48(%3)\n"
258 "81: movl %%edx, 52(%3)\n"
259 " movl 56(%4), %%eax\n"
260 " movl 60(%4), %%edx\n"
261 "14: movl %%eax, 56(%3)\n"
262 "91: movl %%edx, 60(%3)\n"
268 "5: movl %0, %%eax\n"
276 ".section .fixup,\"ax\"\n"
277 "9: lea 0(%%eax,%0,4),%0\n"
280 ".section __ex_table,\"a\"\n"
301 : "=&c"(size), "=&D" (d0), "=&S" (d1)
302 : "1"(to), "2"(from), "0"(size)
303 : "eax", "edx", "memory");
308 __copy_user_zeroing_intel(void *to, const void *from, unsigned long size)
311 __asm__ __volatile__(
313 "0: movl 32(%4), %%eax\n"
316 "1: movl 64(%4), %%eax\n"
318 "2: movl 0(%4), %%eax\n"
319 "21: movl 4(%4), %%edx\n"
320 " movl %%eax, 0(%3)\n"
321 " movl %%edx, 4(%3)\n"
322 "3: movl 8(%4), %%eax\n"
323 "31: movl 12(%4),%%edx\n"
324 " movl %%eax, 8(%3)\n"
325 " movl %%edx, 12(%3)\n"
326 "4: movl 16(%4), %%eax\n"
327 "41: movl 20(%4), %%edx\n"
328 " movl %%eax, 16(%3)\n"
329 " movl %%edx, 20(%3)\n"
330 "10: movl 24(%4), %%eax\n"
331 "51: movl 28(%4), %%edx\n"
332 " movl %%eax, 24(%3)\n"
333 " movl %%edx, 28(%3)\n"
334 "11: movl 32(%4), %%eax\n"
335 "61: movl 36(%4), %%edx\n"
336 " movl %%eax, 32(%3)\n"
337 " movl %%edx, 36(%3)\n"
338 "12: movl 40(%4), %%eax\n"
339 "71: movl 44(%4), %%edx\n"
340 " movl %%eax, 40(%3)\n"
341 " movl %%edx, 44(%3)\n"
342 "13: movl 48(%4), %%eax\n"
343 "81: movl 52(%4), %%edx\n"
344 " movl %%eax, 48(%3)\n"
345 " movl %%edx, 52(%3)\n"
346 "14: movl 56(%4), %%eax\n"
347 "91: movl 60(%4), %%edx\n"
348 " movl %%eax, 56(%3)\n"
349 " movl %%edx, 60(%3)\n"
355 "5: movl %0, %%eax\n"
363 ".section .fixup,\"ax\"\n"
364 "9: lea 0(%%eax,%0,4),%0\n"
367 " xorl %%eax,%%eax\n"
373 ".section __ex_table,\"a\"\n"
396 : "=&c"(size), "=&D" (d0), "=&S" (d1)
397 : "1"(to), "2"(from), "0"(size)
398 : "eax", "edx", "memory");
403 * Leave these declared but undefined. They should not be any references to
407 __copy_user_zeroing_intel(void *to, const void *from, unsigned long size);
409 __copy_user_intel(void *to, const void *from,unsigned long size);
410 #endif /* CONFIG_X86_INTEL_USERCOPY */
412 /* Generic arbitrary sized copy. */
413 #define __copy_user(to,from,size) \
415 int __d0, __d1, __d2; \
416 __asm__ __volatile__( \
432 ".section .fixup,\"ax\"\n" \
435 "3: lea 0(%3,%0,4),%0\n" \
438 ".section __ex_table,\"a\"\n" \
444 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
445 : "3"(size), "0"(size), "1"(to), "2"(from) \
449 #define __copy_user_zeroing(to,from,size) \
451 int __d0, __d1, __d2; \
452 __asm__ __volatile__( \
468 ".section .fixup,\"ax\"\n" \
471 "3: lea 0(%3,%0,4),%0\n" \
474 " xorl %%eax,%%eax\n" \
480 ".section __ex_table,\"a\"\n" \
486 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
487 : "3"(size), "0"(size), "1"(to), "2"(from) \
492 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
494 #ifndef CONFIG_X86_WP_WORKS_OK
495 if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
496 ((unsigned long )to) < TASK_SIZE) {
498 * CPU does not honor the WP bit when writing
499 * from supervisory mode, and due to preemption or SMP,
500 * the page tables can change at any time.
501 * Do it manually. Manfred <manfred@colorfullife.com>
504 unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
505 unsigned long len = PAGE_SIZE - offset;
514 down_read(¤t->mm->mmap_sem);
515 retval = get_user_pages(current, current->mm,
516 (unsigned long )to, 1, 1, 0, &pg, NULL);
518 if (retval == -ENOMEM && current->pid == 1) {
519 up_read(¤t->mm->mmap_sem);
520 blk_congestion_wait(WRITE, HZ/50);
527 maddr = kmap_atomic(pg, KM_USER0);
528 memcpy(maddr + offset, from, len);
529 kunmap_atomic(maddr, KM_USER0);
530 set_page_dirty_lock(pg);
532 up_read(¤t->mm->mmap_sem);
541 if (movsl_is_ok(to, from, n))
542 __copy_user((void *)to, from, n);
544 n = __copy_user_intel((void *)to, from, n);
548 unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
550 if (movsl_is_ok(to, from, n))
551 __copy_user_zeroing(to, (const void *) from, n);
553 n = __copy_user_zeroing_intel(to, (const void *) from, n);