commented early_printk patch because of rejects.
[linux-flexiantxendom0-3.2.10.git] / include / asm-mips / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_UACCESS_H
10 #define _ASM_UACCESS_H
11
12 #include <linux/errno.h>
13 #include <linux/thread_info.h>
14
15 /*
16  * The fs value determines whether argument validity checking should be
17  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18  * get_fs() == KERNEL_DS, checking is bypassed.
19  *
20  * For historical reasons, these macros are grossly misnamed.
21  */
22 #ifdef CONFIG_MIPS32
23 #define __UA_ADDR       ".word"
24 #define __UA_LA         "la"
25 #define __UA_ADDU       "addu"
26
27 #define KERNEL_DS       ((mm_segment_t) { (unsigned long) 0L })
28 #define USER_DS         ((mm_segment_t) { (unsigned long) -1L })
29
30 #define VERIFY_READ    0
31 #define VERIFY_WRITE   1
32
33 #define __access_ok(addr, size, mask)                                   \
34         (((signed long)((mask)&(addr | ((addr) + (size)) | __ua_size(size)))) >= 0)
35  
36 #define __access_mask ((long)(get_fs().seg))
37  
38 #define access_ok(type, addr, size)                                     \
39         __access_ok(((unsigned long)(addr)),(size),__access_mask)
40
41 #endif /* CONFIG_MIPS32 */
42
43 #ifdef CONFIG_MIPS64
44 #define __UA_ADDR       ".dword"
45 #define __UA_LA         "dla"
46 #define __UA_ADDU       "daddu"
47
48 #define KERNEL_DS       ((mm_segment_t) { 0UL })
49 #define USER_DS         ((mm_segment_t) { -TASK_SIZE })
50
51 #define VERIFY_READ    0
52 #define VERIFY_WRITE   1
53
54 #define __access_ok(addr, size, mask)                                   \
55         (((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0)
56
57 #define __access_mask get_fs().seg
58
59 #define access_ok(type, addr, size)                                     \
60         __access_ok((unsigned long)(addr), (size), __access_mask)
61
62 #endif /* CONFIG_MIPS64 */
63
64 #define get_ds()        (KERNEL_DS)
65 #define get_fs()        (current_thread_info()->addr_limit)
66 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
67
68 #define segment_eq(a,b) ((a).seg == (b).seg)
69
70
71 /*
72  * Is a address valid? This does a straighforward calculation rather
73  * than tests.
74  *
75  * Address valid if:
76  *  - "addr" doesn't have any high-bits set
77  *  - AND "size" doesn't have any high-bits set
78  *  - AND "addr+size" doesn't have any high-bits set
79  *  - OR we are in kernel mode.
80  *
81  * __ua_size() is a trick to avoid runtime checking of positive constant
82  * sizes; for those we already know at compile time that the size is ok.
83  */
84 #define __ua_size(size)                                                 \
85         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
86
87 static inline int verify_area(int type, const void * addr, unsigned long size)
88 {
89         return access_ok(type, addr, size) ? 0 : -EFAULT;
90 }
91
92 /*
93  * Uh, these should become the main single-value transfer routines ...
94  * They automatically use the right size if we just have the right
95  * pointer type ...
96  *
97  * As MIPS uses the same address space for kernel and user data, we
98  * can just do these as direct assignments.
99  *
100  * Careful to not
101  * (a) re-use the arguments for side effects (sizeof is ok)
102  * (b) require any knowledge of processes at this stage
103  */
104 #define put_user(x,ptr) \
105         __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106 #define get_user(x,ptr) \
107         __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
108
109 /*
110  * The "__xxx" versions do not do address space checking, useful when
111  * doing multiple accesses to the same area (the user has to do the
112  * checks by hand with "access_ok()")
113  */
114 #define __put_user(x,ptr) \
115         __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
116 #define __get_user(x,ptr) \
117         __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
118
119 struct __large_struct { unsigned long buf[100]; };
120 #define __m(x) (*(struct __large_struct *)(x))
121
122 /*
123  * Yuck.  We need two variants, one for 64bit operation and one
124  * for 32 bit mode and old iron.
125  */
126 #ifdef __mips64
127 #define __GET_USER_DW __get_user_asm("ld")
128 #else
129 #define __GET_USER_DW __get_user_asm_ll32
130 #endif
131
132 #define __get_user_nocheck(x,ptr,size)                          \
133 ({                                                              \
134         long __gu_err;                                          \
135         __typeof(*(ptr)) __gu_val;                              \
136         long __gu_addr;                                         \
137         __asm__("":"=r" (__gu_val));                            \
138         __gu_addr = (long) (ptr);                               \
139         __asm__("":"=r" (__gu_err));                            \
140         switch (size) {                                         \
141         case 1: __get_user_asm("lb"); break;                    \
142         case 2: __get_user_asm("lh"); break;                    \
143         case 4: __get_user_asm("lw"); break;                    \
144         case 8: __GET_USER_DW; break;                           \
145         default: __get_user_unknown(); break;                   \
146         } x = (__typeof__(*(ptr))) __gu_val; __gu_err;          \
147 })
148
149 #define __get_user_check(x,ptr,size)                            \
150 ({                                                              \
151         long __gu_err;                                          \
152         __typeof__(*(ptr)) __gu_val;                            \
153         long __gu_addr;                                         \
154         __asm__("":"=r" (__gu_val));                            \
155         __gu_addr = (long) (ptr);                               \
156         __asm__("":"=r" (__gu_err));                            \
157         if (__access_ok(__gu_addr,size,__access_mask)) {        \
158                 switch (size) {                                 \
159                 case 1: __get_user_asm("lb"); break;            \
160                 case 2: __get_user_asm("lh"); break;            \
161                 case 4: __get_user_asm("lw"); break;            \
162                 case 8: __GET_USER_DW; break;                   \
163                 default: __get_user_unknown(); break;           \
164                 }                                               \
165         } x = (__typeof__(*(ptr))) __gu_val; __gu_err;          \
166 })
167
168 #define __get_user_asm(insn)                                    \
169 ({                                                              \
170         __asm__ __volatile__(                                   \
171         "1:\t" insn "\t%1,%2\n\t"                               \
172         "move\t%0,$0\n"                                         \
173         "2:\n\t"                                                \
174         ".section\t.fixup,\"ax\"\n"                             \
175         "3:\tli\t%0,%3\n\t"                                     \
176         "move\t%1,$0\n\t"                                       \
177         "j\t2b\n\t"                                             \
178         ".previous\n\t"                                         \
179         ".section\t__ex_table,\"a\"\n\t"                        \
180         __UA_ADDR "\t1b,3b\n\t"                                 \
181         ".previous"                                             \
182         :"=r" (__gu_err), "=r" (__gu_val)                       \
183         :"o" (__m(__gu_addr)), "i" (-EFAULT));                  \
184 })
185
186 /*
187  * Get a long long 64 using 32 bit registers.
188  */
189 #define __get_user_asm_ll32                                     \
190 ({                                                              \
191         __asm__ __volatile__(                                   \
192         "1:\tlw\t%1,%2\n"                                       \
193         "2:\tlw\t%D1,%3\n\t"                                    \
194         "move\t%0,$0\n"                                         \
195         "3:\t.section\t.fixup,\"ax\"\n"                         \
196         "4:\tli\t%0,%4\n\t"                                     \
197         "move\t%1,$0\n\t"                                       \
198         "move\t%D1,$0\n\t"                                      \
199         "j\t3b\n\t"                                             \
200         ".previous\n\t"                                         \
201         ".section\t__ex_table,\"a\"\n\t"                        \
202         __UA_ADDR "\t1b,4b\n\t"                                 \
203         __UA_ADDR "\t2b,4b\n\t"                                 \
204         ".previous"                                             \
205         :"=r" (__gu_err), "=&r" (__gu_val)                      \
206         :"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)),        \
207          "i" (-EFAULT));                                        \
208 })
209
210 extern void __get_user_unknown(void);
211
212 /*
213  * Yuck.  We need two variants, one for 64bit operation and one
214  * for 32 bit mode and old iron.
215  */
216 #ifdef __mips64
217 #define __PUT_USER_DW __put_user_asm("sd")
218 #else
219 #define __PUT_USER_DW __put_user_asm_ll32
220 #endif
221
222 #define __put_user_nocheck(x,ptr,size)                          \
223 ({                                                              \
224         long __pu_err;                                          \
225         __typeof__(*(ptr)) __pu_val;                            \
226         long __pu_addr;                                         \
227         __pu_val = (x);                                         \
228         __pu_addr = (long) (ptr);                               \
229         __asm__("":"=r" (__pu_err));                            \
230         switch (size) {                                         \
231         case 1: __put_user_asm("sb"); break;                    \
232         case 2: __put_user_asm("sh"); break;                    \
233         case 4: __put_user_asm("sw"); break;                    \
234         case 8: __PUT_USER_DW; break;                           \
235         default: __put_user_unknown(); break;                   \
236         }                                                       \
237         __pu_err;                                               \
238 })
239
240 #define __put_user_check(x,ptr,size)                            \
241 ({                                                              \
242         long __pu_err;                                          \
243         __typeof__(*(ptr)) __pu_val;                            \
244         long __pu_addr;                                         \
245         __pu_val = (x);                                         \
246         __pu_addr = (long) (ptr);                               \
247         __asm__("":"=r" (__pu_err));                            \
248         if (__access_ok(__pu_addr,size,__access_mask)) {        \
249                 switch (size) {                                 \
250                 case 1: __put_user_asm("sb"); break;            \
251                 case 2: __put_user_asm("sh"); break;            \
252                 case 4: __put_user_asm("sw"); break;            \
253                 case 8: __PUT_USER_DW; break;                   \
254                 default: __put_user_unknown(); break;           \
255                 }                                               \
256         }                                                       \
257         __pu_err;                                               \
258 })
259
260 #define __put_user_asm(insn)                                    \
261 ({                                                              \
262         __asm__ __volatile__(                                   \
263         "1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t"       \
264         "move\t%0, $0\n"                                        \
265         "2:\n\t"                                                \
266         ".section\t.fixup,\"ax\"\n"                             \
267         "3:\tli\t%0,%3\n\t"                                     \
268         "j\t2b\n\t"                                             \
269         ".previous\n\t"                                         \
270         ".section\t__ex_table,\"a\"\n\t"                        \
271         __UA_ADDR "\t1b,3b\n\t"                                 \
272         ".previous"                                             \
273         :"=r" (__pu_err)                                        \
274         :"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); \
275 })
276
277 #define __put_user_asm_ll32                                             \
278 ({                                                                      \
279         __asm__ __volatile__(                                           \
280         "1:\tsw\t%1, %2\t\t\t# __put_user_asm_ll32\n\t"                 \
281         "2:\tsw\t%D1, %3\n"                                             \
282         "move\t%0, $0\n"                                                \
283         "3:\n\t"                                                        \
284         ".section\t.fixup,\"ax\"\n"                                     \
285         "4:\tli\t%0,%4\n\t"                                             \
286         "j\t3b\n\t"                                                     \
287         ".previous\n\t"                                                 \
288         ".section\t__ex_table,\"a\"\n\t"                                \
289         __UA_ADDR "\t1b,4b\n\t"                                         \
290         __UA_ADDR "\t2b,4b\n\t"                                         \
291         ".previous"                                                     \
292         :"=r" (__pu_err)                                                \
293         :"r" (__pu_val), "o" (__m(__pu_addr)),                          \
294          "o" (__m(__pu_addr + 4)), "i" (-EFAULT));                      \
295 })
296
297 extern void __put_user_unknown(void);
298
299 /*
300  * We're generating jump to subroutines which will be outside the range of
301  * jump instructions
302  */
303 #ifdef MODULE
304 #define __MODULE_JAL(destination)                                       \
305         ".set\tnoat\n\t"                                                \
306         __UA_LA "\t$1, " #destination "\n\t"                            \
307         "jalr\t$1\n\t"                                                  \
308         ".set\tat\n\t"
309 #else
310 #define __MODULE_JAL(destination)                                       \
311         "jal\t" #destination "\n\t"
312 #endif
313
314 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
315
316 #define __invoke_copy_to_user(to,from,n)                                \
317 ({                                                                      \
318         register void *__cu_to_r __asm__ ("$4");                        \
319         register const void *__cu_from_r __asm__ ("$5");                \
320         register long __cu_len_r __asm__ ("$6");                        \
321                                                                         \
322         __cu_to_r = (to);                                               \
323         __cu_from_r = (from);                                           \
324         __cu_len_r = (n);                                               \
325         __asm__ __volatile__(                                           \
326         __MODULE_JAL(__copy_user)                                       \
327         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
328         :                                                               \
329         : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",         \
330           "memory");                                                    \
331         __cu_len_r;                                                     \
332 })
333
334 #define __copy_to_user(to,from,n)                                       \
335 ({                                                                      \
336         void *__cu_to;                                                  \
337         const void *__cu_from;                                          \
338         long __cu_len;                                                  \
339                                                                         \
340         __cu_to = (to);                                                 \
341         __cu_from = (from);                                             \
342         __cu_len = (n);                                                 \
343         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
344         __cu_len;                                                       \
345 })
346
347 #define copy_to_user(to,from,n)                                         \
348 ({                                                                      \
349         void *__cu_to;                                                  \
350         const void *__cu_from;                                          \
351         long __cu_len;                                                  \
352                                                                         \
353         __cu_to = (to);                                                 \
354         __cu_from = (from);                                             \
355         __cu_len = (n);                                                 \
356         if (access_ok(VERIFY_WRITE, __cu_to, __cu_len))                 \
357                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
358                                                  __cu_len);             \
359         __cu_len;                                                       \
360 })
361
362 #define __invoke_copy_from_user(to,from,n)                              \
363 ({                                                                      \
364         register void *__cu_to_r __asm__ ("$4");                        \
365         register const void *__cu_from_r __asm__ ("$5");                \
366         register long __cu_len_r __asm__ ("$6");                        \
367                                                                         \
368         __cu_to_r = (to);                                               \
369         __cu_from_r = (from);                                           \
370         __cu_len_r = (n);                                               \
371         __asm__ __volatile__(                                           \
372         ".set\tnoreorder\n\t"                                           \
373         __MODULE_JAL(__copy_user)                                       \
374         ".set\tnoat\n\t"                                                \
375         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
376         ".set\tat\n\t"                                                  \
377         ".set\treorder\n\t"                                             \
378         "move\t%0, $6"          /* XXX */                               \
379         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
380         :                                                               \
381         : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31",         \
382           "memory");                                                    \
383         __cu_len_r;                                                     \
384 })
385
386 #define __copy_from_user(to,from,n)                                     \
387 ({                                                                      \
388         void *__cu_to;                                                  \
389         const void *__cu_from;                                          \
390         long __cu_len;                                                  \
391                                                                         \
392         __cu_to = (to);                                                 \
393         __cu_from = (from);                                             \
394         __cu_len = (n);                                                 \
395         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
396                                            __cu_len);                   \
397         __cu_len;                                                       \
398 })
399
400 #define copy_from_user(to,from,n)                                       \
401 ({                                                                      \
402         void *__cu_to;                                                  \
403         const void *__cu_from;                                          \
404         long __cu_len;                                                  \
405                                                                         \
406         __cu_to = (to);                                                 \
407         __cu_from = (from);                                             \
408         __cu_len = (n);                                                 \
409         if (access_ok(VERIFY_READ, __cu_from, __cu_len))                \
410                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
411                                                    __cu_len);           \
412         __cu_len;                                                       \
413 })
414
415 static inline __kernel_size_t
416 __clear_user(void *addr, __kernel_size_t size)
417 {
418         __kernel_size_t res;
419
420         __asm__ __volatile__(
421                 "move\t$4, %1\n\t"
422                 "move\t$5, $0\n\t"
423                 "move\t$6, %2\n\t"
424                 __MODULE_JAL(__bzero)
425                 "move\t%0, $6"
426                 : "=r" (res)
427                 : "r" (addr), "r" (size)
428                 : "$4", "$5", "$6", "$8", "$9", "$31");
429
430         return res;
431 }
432
433 #define clear_user(addr,n)                                      \
434 ({                                                              \
435         void * __cl_addr = (addr);                              \
436         unsigned long __cl_size = (n);                          \
437         if (__cl_size && access_ok(VERIFY_WRITE,                \
438                 ((unsigned long)(__cl_addr)), __cl_size))       \
439                 __cl_size = __clear_user(__cl_addr, __cl_size); \
440         __cl_size;                                              \
441 })
442
443 /*
444  * Returns: -EFAULT if exception before terminator, N if the entire
445  * buffer filled, else strlen.
446  */
447 static inline long
448 __strncpy_from_user(char *__to, const char *__from, long __len)
449 {
450         long res;
451
452         __asm__ __volatile__(
453                 "move\t$4, %1\n\t"
454                 "move\t$5, %2\n\t"
455                 "move\t$6, %3\n\t"
456                 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
457                 "move\t%0, $2"
458                 : "=r" (res)
459                 : "r" (__to), "r" (__from), "r" (__len)
460                 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
461
462         return res;
463 }
464
465 static inline long
466 strncpy_from_user(char *__to, const char *__from, long __len)
467 {
468         long res;
469
470         __asm__ __volatile__(
471                 "move\t$4, %1\n\t"
472                 "move\t$5, %2\n\t"
473                 "move\t$6, %3\n\t"
474                 __MODULE_JAL(__strncpy_from_user_asm)
475                 "move\t%0, $2"
476                 : "=r" (res)
477                 : "r" (__to), "r" (__from), "r" (__len)
478                 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
479
480         return res;
481 }
482
483 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
484 static inline long __strlen_user(const char *s)
485 {
486         long res;
487
488         __asm__ __volatile__(
489                 "move\t$4, %1\n\t"
490                 __MODULE_JAL(__strlen_user_nocheck_asm)
491                 "move\t%0, $2"
492                 : "=r" (res)
493                 : "r" (s)
494                 : "$2", "$4", "$8", "$31");
495
496         return res;
497 }
498
499 static inline long strlen_user(const char *s)
500 {
501         long res;
502
503         __asm__ __volatile__(
504                 "move\t$4, %1\n\t"
505                 __MODULE_JAL(__strlen_user_asm)
506                 "move\t%0, $2"
507                 : "=r" (res)
508                 : "r" (s)
509                 : "$2", "$4", "$8", "$31");
510
511         return res;
512 }
513
514 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
515 static inline long __strnlen_user(const char *s, long n)
516 {
517         long res;
518
519         __asm__ __volatile__(
520                 "move\t$4, %1\n\t"
521                 "move\t$5, %2\n\t"
522                 __MODULE_JAL(__strnlen_user_nocheck_asm)
523                 "move\t%0, $2"
524                 : "=r" (res)
525                 : "r" (s), "r" (n)
526                 : "$2", "$4", "$5", "$8", "$31");
527
528         return res;
529 }
530
531 static inline long strnlen_user(const char *s, long n)
532 {
533         long res;
534
535         __asm__ __volatile__(
536                 "move\t$4, %1\n\t"
537                 "move\t$5, %2\n\t"
538                 __MODULE_JAL(__strnlen_user_asm)
539                 "move\t%0, $2"
540                 : "=r" (res)
541                 : "r" (s), "r" (n)
542                 : "$2", "$4", "$5", "$8", "$31");
543
544         return res;
545 }
546
547 struct exception_table_entry
548 {
549         unsigned long insn;
550         unsigned long nextinsn;
551 };
552
553 #endif /* _ASM_UACCESS_H */