2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #define _ASM_UACCESS_H
12 #include <linux/errno.h>
13 #include <linux/thread_info.h>
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define __UA_ADDR ".word"
25 #define __UA_ADDU "addu"
27 #define KERNEL_DS ((mm_segment_t) { (unsigned long) 0L })
28 #define USER_DS ((mm_segment_t) { (unsigned long) -1L })
31 #define VERIFY_WRITE 1
33 #define __access_ok(addr, size, mask) \
34 (((signed long)((mask)&(addr | ((addr) + (size)) | __ua_size(size)))) >= 0)
36 #define __access_mask ((long)(get_fs().seg))
38 #define access_ok(type, addr, size) \
39 __access_ok(((unsigned long)(addr)),(size),__access_mask)
41 #endif /* CONFIG_MIPS32 */
44 #define __UA_ADDR ".dword"
46 #define __UA_ADDU "daddu"
48 #define KERNEL_DS ((mm_segment_t) { 0UL })
49 #define USER_DS ((mm_segment_t) { -TASK_SIZE })
52 #define VERIFY_WRITE 1
54 #define __access_ok(addr, size, mask) \
55 (((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0)
57 #define __access_mask get_fs().seg
59 #define access_ok(type, addr, size) \
60 __access_ok((unsigned long)(addr), (size), __access_mask)
62 #endif /* CONFIG_MIPS64 */
64 #define get_ds() (KERNEL_DS)
65 #define get_fs() (current_thread_info()->addr_limit)
66 #define set_fs(x) (current_thread_info()->addr_limit = (x))
68 #define segment_eq(a,b) ((a).seg == (b).seg)
72 * Is a address valid? This does a straighforward calculation rather
76 * - "addr" doesn't have any high-bits set
77 * - AND "size" doesn't have any high-bits set
78 * - AND "addr+size" doesn't have any high-bits set
79 * - OR we are in kernel mode.
81 * __ua_size() is a trick to avoid runtime checking of positive constant
82 * sizes; for those we already know at compile time that the size is ok.
84 #define __ua_size(size) \
85 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
87 static inline int verify_area(int type, const void * addr, unsigned long size)
89 return access_ok(type, addr, size) ? 0 : -EFAULT;
93 * Uh, these should become the main single-value transfer routines ...
94 * They automatically use the right size if we just have the right
97 * As MIPS uses the same address space for kernel and user data, we
98 * can just do these as direct assignments.
101 * (a) re-use the arguments for side effects (sizeof is ok)
102 * (b) require any knowledge of processes at this stage
104 #define put_user(x,ptr) \
105 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106 #define get_user(x,ptr) \
107 __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
110 * The "__xxx" versions do not do address space checking, useful when
111 * doing multiple accesses to the same area (the user has to do the
112 * checks by hand with "access_ok()")
114 #define __put_user(x,ptr) \
115 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
116 #define __get_user(x,ptr) \
117 __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
119 struct __large_struct { unsigned long buf[100]; };
120 #define __m(x) (*(struct __large_struct *)(x))
123 * Yuck. We need two variants, one for 64bit operation and one
124 * for 32 bit mode and old iron.
127 #define __GET_USER_DW __get_user_asm("ld")
129 #define __GET_USER_DW __get_user_asm_ll32
132 #define __get_user_nocheck(x,ptr,size) \
135 __typeof(*(ptr)) __gu_val; \
137 __asm__("":"=r" (__gu_val)); \
138 __gu_addr = (long) (ptr); \
139 __asm__("":"=r" (__gu_err)); \
141 case 1: __get_user_asm("lb"); break; \
142 case 2: __get_user_asm("lh"); break; \
143 case 4: __get_user_asm("lw"); break; \
144 case 8: __GET_USER_DW; break; \
145 default: __get_user_unknown(); break; \
146 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; \
149 #define __get_user_check(x,ptr,size) \
152 __typeof__(*(ptr)) __gu_val; \
154 __asm__("":"=r" (__gu_val)); \
155 __gu_addr = (long) (ptr); \
156 __asm__("":"=r" (__gu_err)); \
157 if (__access_ok(__gu_addr,size,__access_mask)) { \
159 case 1: __get_user_asm("lb"); break; \
160 case 2: __get_user_asm("lh"); break; \
161 case 4: __get_user_asm("lw"); break; \
162 case 8: __GET_USER_DW; break; \
163 default: __get_user_unknown(); break; \
165 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; \
168 #define __get_user_asm(insn) \
170 __asm__ __volatile__( \
171 "1:\t" insn "\t%1,%2\n\t" \
174 ".section\t.fixup,\"ax\"\n" \
175 "3:\tli\t%0,%3\n\t" \
179 ".section\t__ex_table,\"a\"\n\t" \
180 __UA_ADDR "\t1b,3b\n\t" \
182 :"=r" (__gu_err), "=r" (__gu_val) \
183 :"o" (__m(__gu_addr)), "i" (-EFAULT)); \
187 * Get a long long 64 using 32 bit registers.
189 #define __get_user_asm_ll32 \
191 __asm__ __volatile__( \
193 "2:\tlw\t%D1,%3\n\t" \
195 "3:\t.section\t.fixup,\"ax\"\n" \
196 "4:\tli\t%0,%4\n\t" \
201 ".section\t__ex_table,\"a\"\n\t" \
202 __UA_ADDR "\t1b,4b\n\t" \
203 __UA_ADDR "\t2b,4b\n\t" \
205 :"=r" (__gu_err), "=&r" (__gu_val) \
206 :"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
210 extern void __get_user_unknown(void);
213 * Yuck. We need two variants, one for 64bit operation and one
214 * for 32 bit mode and old iron.
217 #define __PUT_USER_DW __put_user_asm("sd")
219 #define __PUT_USER_DW __put_user_asm_ll32
222 #define __put_user_nocheck(x,ptr,size) \
225 __typeof__(*(ptr)) __pu_val; \
228 __pu_addr = (long) (ptr); \
229 __asm__("":"=r" (__pu_err)); \
231 case 1: __put_user_asm("sb"); break; \
232 case 2: __put_user_asm("sh"); break; \
233 case 4: __put_user_asm("sw"); break; \
234 case 8: __PUT_USER_DW; break; \
235 default: __put_user_unknown(); break; \
240 #define __put_user_check(x,ptr,size) \
243 __typeof__(*(ptr)) __pu_val; \
246 __pu_addr = (long) (ptr); \
247 __asm__("":"=r" (__pu_err)); \
248 if (__access_ok(__pu_addr,size,__access_mask)) { \
250 case 1: __put_user_asm("sb"); break; \
251 case 2: __put_user_asm("sh"); break; \
252 case 4: __put_user_asm("sw"); break; \
253 case 8: __PUT_USER_DW; break; \
254 default: __put_user_unknown(); break; \
260 #define __put_user_asm(insn) \
262 __asm__ __volatile__( \
263 "1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t" \
266 ".section\t.fixup,\"ax\"\n" \
267 "3:\tli\t%0,%3\n\t" \
270 ".section\t__ex_table,\"a\"\n\t" \
271 __UA_ADDR "\t1b,3b\n\t" \
274 :"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); \
277 #define __put_user_asm_ll32 \
279 __asm__ __volatile__( \
280 "1:\tsw\t%1, %2\t\t\t# __put_user_asm_ll32\n\t" \
281 "2:\tsw\t%D1, %3\n" \
284 ".section\t.fixup,\"ax\"\n" \
285 "4:\tli\t%0,%4\n\t" \
288 ".section\t__ex_table,\"a\"\n\t" \
289 __UA_ADDR "\t1b,4b\n\t" \
290 __UA_ADDR "\t2b,4b\n\t" \
293 :"r" (__pu_val), "o" (__m(__pu_addr)), \
294 "o" (__m(__pu_addr + 4)), "i" (-EFAULT)); \
297 extern void __put_user_unknown(void);
300 * We're generating jump to subroutines which will be outside the range of
304 #define __MODULE_JAL(destination) \
306 __UA_LA "\t$1, " #destination "\n\t" \
310 #define __MODULE_JAL(destination) \
311 "jal\t" #destination "\n\t"
314 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
316 #define __invoke_copy_to_user(to,from,n) \
318 register void *__cu_to_r __asm__ ("$4"); \
319 register const void *__cu_from_r __asm__ ("$5"); \
320 register long __cu_len_r __asm__ ("$6"); \
323 __cu_from_r = (from); \
325 __asm__ __volatile__( \
326 __MODULE_JAL(__copy_user) \
327 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
329 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
334 #define __copy_to_user(to,from,n) \
337 const void *__cu_from; \
341 __cu_from = (from); \
343 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
347 #define copy_to_user(to,from,n) \
350 const void *__cu_from; \
354 __cu_from = (from); \
356 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
357 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
362 #define __invoke_copy_from_user(to,from,n) \
364 register void *__cu_to_r __asm__ ("$4"); \
365 register const void *__cu_from_r __asm__ ("$5"); \
366 register long __cu_len_r __asm__ ("$6"); \
369 __cu_from_r = (from); \
371 __asm__ __volatile__( \
372 ".set\tnoreorder\n\t" \
373 __MODULE_JAL(__copy_user) \
375 __UA_ADDU "\t$1, %1, %2\n\t" \
377 ".set\treorder\n\t" \
378 "move\t%0, $6" /* XXX */ \
379 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
381 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
386 #define __copy_from_user(to,from,n) \
389 const void *__cu_from; \
393 __cu_from = (from); \
395 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
400 #define copy_from_user(to,from,n) \
403 const void *__cu_from; \
407 __cu_from = (from); \
409 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
410 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
415 static inline __kernel_size_t
416 __clear_user(void *addr, __kernel_size_t size)
420 __asm__ __volatile__(
424 __MODULE_JAL(__bzero)
427 : "r" (addr), "r" (size)
428 : "$4", "$5", "$6", "$8", "$9", "$31");
433 #define clear_user(addr,n) \
435 void * __cl_addr = (addr); \
436 unsigned long __cl_size = (n); \
437 if (__cl_size && access_ok(VERIFY_WRITE, \
438 ((unsigned long)(__cl_addr)), __cl_size)) \
439 __cl_size = __clear_user(__cl_addr, __cl_size); \
444 * Returns: -EFAULT if exception before terminator, N if the entire
445 * buffer filled, else strlen.
448 __strncpy_from_user(char *__to, const char *__from, long __len)
452 __asm__ __volatile__(
456 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
459 : "r" (__to), "r" (__from), "r" (__len)
460 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
466 strncpy_from_user(char *__to, const char *__from, long __len)
470 __asm__ __volatile__(
474 __MODULE_JAL(__strncpy_from_user_asm)
477 : "r" (__to), "r" (__from), "r" (__len)
478 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
483 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
484 static inline long __strlen_user(const char *s)
488 __asm__ __volatile__(
490 __MODULE_JAL(__strlen_user_nocheck_asm)
494 : "$2", "$4", "$8", "$31");
499 static inline long strlen_user(const char *s)
503 __asm__ __volatile__(
505 __MODULE_JAL(__strlen_user_asm)
509 : "$2", "$4", "$8", "$31");
514 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
515 static inline long __strnlen_user(const char *s, long n)
519 __asm__ __volatile__(
522 __MODULE_JAL(__strnlen_user_nocheck_asm)
526 : "$2", "$4", "$5", "$8", "$31");
531 static inline long strnlen_user(const char *s, long n)
535 __asm__ __volatile__(
538 __MODULE_JAL(__strnlen_user_asm)
542 : "$2", "$4", "$5", "$8", "$31");
547 struct exception_table_entry
550 unsigned long nextinsn;
553 #endif /* _ASM_UACCESS_H */