2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/errno.h>
19 #include <asm/processor.h>
21 #include <asm/cache.h>
24 #if defined(CONFIG_4xx) || defined(CONFIG_8xx)
25 #define CACHE_LINE_SIZE 16
26 #define LG_CACHE_LINE_SIZE 4
27 #define MAX_COPY_PREFETCH 1
28 #elif !defined(CONFIG_PPC64BRIDGE)
29 #define CACHE_LINE_SIZE 32
30 #define LG_CACHE_LINE_SIZE 5
31 #define MAX_COPY_PREFETCH 4
33 #define CACHE_LINE_SIZE 128
34 #define LG_CACHE_LINE_SIZE 7
35 #define MAX_COPY_PREFETCH 1
36 #endif /* CONFIG_4xx || CONFIG_8xx */
41 * Returns (address we're running at) - (address we were linked at)
42 * for use before the text and data are mapped to KERNELBASE.
55 /* void __no_use_save_flags(unsigned long *flags) */
56 _GLOBAL(__no_use_save_flags)
61 /* void __no_use_restore_flags(unsigned long flags) */
62 _GLOBAL(__no_use_restore_flags)
64 * Just set/clear the MSR_EE bit through restore/flags but do not
65 * change anything else. This is needed by the RT system and makes
70 /* Copy all except the MSR_EE bit from r4 (current MSR value)
71 to r3. This is the sort of thing the rlwimi instruction is
72 designed for. -- paulus. */
74 /* Check if things are setup the way we want _already_. */
77 /* are we enabling interrupts? */
80 /* if so, check if there are any lost interrupts */
81 lis r7,ppc_n_lost_interrupts@ha
82 lwz r7,ppc_n_lost_interrupts@l(r7)
83 cmpi 0,r7,0 /* lost interrupts to process first? */
84 bne- do_lost_interrupts
91 mfmsr r0 /* Get current interrupt state */
92 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
93 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
94 sync /* Some chip revs have problems here... */
95 mtmsr r0 /* Update machine state */
99 lis r4,ppc_n_lost_interrupts@ha
100 lwz r4,ppc_n_lost_interrupts@l(r4)
101 mfmsr r3 /* Get current state */
102 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
103 cmpi 0,r4,0 /* lost interrupts to process first? */
104 bne- do_lost_interrupts
105 sync /* Some chip revs have problems here... */
106 mtmsr r3 /* Update machine state */
110 * We were about to enable interrupts but we have to simulate
111 * some interrupts that were lost by enable_irq first.
113 _GLOBAL(do_lost_interrupts)
119 lis r4,ppc_n_lost_interrupts@ha
120 lwz r4,ppc_n_lost_interrupts@l(r4)
133 * complement mask on the msr then "or" some values on.
134 * _nmask_and_or_msr(nmask, value_to_or)
136 _GLOBAL(_nmask_and_or_msr)
137 mfmsr r0 /* Get current msr */
138 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
139 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
140 sync /* Some chip revs have problems here... */
141 mtmsr r0 /* Update machine state */
149 #if defined(CONFIG_SMP)
152 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
155 lis r9,hash_table_lock@h
156 ori r9,r9,hash_table_lock@l
165 #endif /* CONFIG_SMP */
173 stw r0,0(r9) /* clear hash_table_lock */
180 * Flush MMU TLB for a particular address
183 #if defined(CONFIG_SMP)
186 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
189 lis r9,hash_table_lock@h
190 ori r9,r9,hash_table_lock@l
199 #endif /* CONFIG_SMP */
206 stw r0,0(r9) /* clear hash_table_lock */
213 * Flush instruction cache.
214 * This is a no-op on the 601.
216 _GLOBAL(flush_instruction_cache)
223 rlwinm r3,r3,16,16,31
225 beqlr /* for 601, do nothing */
226 /* 603/604 processor - use invalidate-all bit in HID0 */
230 #endif /* CONFIG_8xx */
235 * Write any modified data cache blocks out to memory
236 * and invalidate the corresponding instruction cache blocks.
237 * This is a no-op on the 601.
239 * flush_icache_range(unsigned long start, unsigned long stop)
241 _GLOBAL(flush_icache_range)
243 rlwinm r5,r5,16,16,31
245 beqlr /* for 601, do nothing */
246 li r5,CACHE_LINE_SIZE-1
250 srwi. r4,r4,LG_CACHE_LINE_SIZE
255 addi r3,r3,CACHE_LINE_SIZE
257 sync /* wait for dcbst's to get to ram */
260 addi r6,r6,CACHE_LINE_SIZE
267 * Like above, but only do the D-cache.
269 * flush_dcache_range(unsigned long start, unsigned long stop)
271 _GLOBAL(flush_dcache_range)
272 li r5,CACHE_LINE_SIZE-1
276 srwi. r4,r4,LG_CACHE_LINE_SIZE
281 addi r3,r3,CACHE_LINE_SIZE
283 sync /* wait for dcbst's to get to ram */
287 * Flush a particular page from the data cache to RAM.
288 * Note: this is necessary because the instruction cache does *not*
289 * snoop from the data cache.
290 * This is a no-op on the 601 which has a unified cache.
292 * void __flush_page_to_ram(void *page)
294 _GLOBAL(__flush_page_to_ram)
296 rlwinm r5,r5,16,16,31
298 beqlr /* for 601, do nothing */
299 rlwinm r3,r3,0,0,19 /* Get page base address */
300 li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
303 0: dcbst 0,r3 /* Write line to ram */
304 addi r3,r3,CACHE_LINE_SIZE
309 addi r6,r6,CACHE_LINE_SIZE
316 * Flush a particular page from the instruction cache.
317 * Note: this is necessary because the instruction cache does *not*
318 * snoop from the data cache.
319 * This is a no-op on the 601 which has a unified cache.
321 * void __flush_icache_page(void *page)
323 _GLOBAL(__flush_icache_page)
325 rlwinm r5,r5,16,16,31
327 beqlr /* for 601, do nothing */
328 li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
331 addi r3,r3,CACHE_LINE_SIZE
338 * Clear a page using the dcbz instruction, which doesn't cause any
339 * memory traffic (except to write out any cache lines which get
340 * displaced). This only works on cacheable memory.
343 li r0,4096/CACHE_LINE_SIZE
354 addi r3,r3,CACHE_LINE_SIZE
359 * Copy a whole page. We use the dcbz instruction on the destination
360 * to reduce memory traffic (it eliminates the unnecessary reads of
361 * the destination into cache). This requires that the destination
364 #define COPY_16_BYTES \
380 #if MAX_COPY_PREFETCH > 1
381 li r0,MAX_COPY_PREFETCH
385 addi r11,r11,CACHE_LINE_SIZE
387 #else /* MAX_COPY_PREFETCH == 1 */
389 li r11,CACHE_LINE_SIZE+4
390 #endif /* MAX_COPY_PREFETCH */
391 #endif /* CONFIG_8xx */
393 li r0,4096/CACHE_LINE_SIZE
401 #if CACHE_LINE_SIZE >= 32
403 #if CACHE_LINE_SIZE >= 64
406 #if CACHE_LINE_SIZE >= 128
418 * Atomic [test&set] exchange
420 * unsigned long xchg_u32(void *ptr, unsigned long val)
421 * Changes the memory location '*ptr' to be val and returns
422 * the previous value stored there.
425 mr r5,r3 /* Save pointer */
426 10: lwarx r3,0,r5 /* Fetch old value & reserve */
427 stwcx. r4,0,r5 /* Update with new value */
428 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
432 * Try to acquire a spinlock.
433 * Only does the stwcx. if the load returned 0 - the Programming
434 * Environments Manual suggests not doing unnecessary stcwx.'s
435 * since they may inhibit forward progress by other CPUs in getting
438 _GLOBAL(__spin_trylock)
440 eieio /* prevent reordering of stores */
442 lwarx r3,0,r4 /* fetch old value, establish reservation */
443 cmpwi 0,r3,0 /* is it 0? */
444 bnelr- /* return failure if not */
445 stwcx. r5,0,r4 /* try to update with new value */
446 bne- 1f /* if we failed */
447 eieio /* prevent reordering of stores */
449 1: li r3,1 /* return non-zero for failure */
453 * Atomic add/sub/inc/dec operations
455 * void atomic_add(int c, int *v)
456 * void atomic_sub(int c, int *v)
457 * void atomic_inc(int *v)
458 * void atomic_dec(int *v)
459 * int atomic_dec_and_test(int *v)
460 * int atomic_inc_return(int *v)
461 * int atomic_dec_return(int *v)
462 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
463 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
465 #if 0 /* now inline - paulus */
467 10: lwarx r5,0,r4 /* Fetch old value & reserve */
468 add r5,r5,r3 /* Perform 'add' operation */
469 stwcx. r5,0,r4 /* Update with new value */
470 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
472 _GLOBAL(atomic_add_return)
473 10: lwarx r5,0,r4 /* Fetch old value & reserve */
474 add r5,r5,r3 /* Perform 'add' operation */
475 stwcx. r5,0,r4 /* Update with new value */
476 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
480 10: lwarx r5,0,r4 /* Fetch old value & reserve */
481 sub r5,r5,r3 /* Perform 'add' operation */
482 stwcx. r5,0,r4 /* Update with new value */
483 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
486 10: lwarx r5,0,r3 /* Fetch old value & reserve */
487 addi r5,r5,1 /* Perform 'add' operation */
488 stwcx. r5,0,r3 /* Update with new value */
489 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
491 _GLOBAL(atomic_inc_return)
492 10: lwarx r5,0,r3 /* Fetch old value & reserve */
493 addi r5,r5,1 /* Perform 'add' operation */
494 stwcx. r5,0,r3 /* Update with new value */
495 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
496 mr r3,r5 /* Return new value */
499 10: lwarx r5,0,r3 /* Fetch old value & reserve */
500 subi r5,r5,1 /* Perform 'add' operation */
501 stwcx. r5,0,r3 /* Update with new value */
502 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
504 _GLOBAL(atomic_dec_return)
505 10: lwarx r5,0,r3 /* Fetch old value & reserve */
506 subi r5,r5,1 /* Perform 'add' operation */
507 stwcx. r5,0,r3 /* Update with new value */
508 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
509 mr r3,r5 /* Return new value */
511 _GLOBAL(atomic_dec_and_test)
512 10: lwarx r5,0,r3 /* Fetch old value & reserve */
513 subi r5,r5,1 /* Perform 'add' operation */
514 stwcx. r5,0,r3 /* Update with new value */
515 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
520 _GLOBAL(atomic_clear_mask)
526 _GLOBAL(atomic_set_mask)
534 * I/O string operations
536 * insb(port, buf, len)
537 * outsb(port, buf, len)
538 * insw(port, buf, len)
539 * outsw(port, buf, len)
540 * insl(port, buf, len)
541 * outsl(port, buf, len)
542 * insw_ns(port, buf, len)
543 * outsw_ns(port, buf, len)
544 * insl_ns(port, buf, len)
545 * outsl_ns(port, buf, len)
547 * The *_ns versions don't do byte-swapping.
662 * Extended precision shifts.
664 * Updated to be valid for shift counts from 0 to 63 inclusive.
667 * R3/R4 has 64 bit value
671 * ashrdi3: arithmetic right shift (sign propagation)
672 * lshrdi3: logical right shift
673 * ashldi3: left shift
677 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
678 addi r7,r5,32 # could be xori, or addi with -32
679 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
680 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
681 sraw r7,r3,r7 # t2 = MSW >> (count-32)
682 or r4,r4,r6 # LSW |= t1
683 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
684 sraw r3,r3,r5 # MSW = MSW >> count
685 or r4,r4,r7 # LSW |= t2
690 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
691 addi r7,r5,32 # could be xori, or addi with -32
692 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
693 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
694 or r3,r3,r6 # MSW |= t1
695 slw r4,r4,r5 # LSW = LSW << count
696 or r3,r3,r7 # MSW |= t2
701 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
702 addi r7,r5,32 # could be xori, or addi with -32
703 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
704 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
705 or r4,r4,r6 # LSW |= t1
706 srw r3,r3,r5 # MSW = MSW >> count
707 or r4,r4,r7 # LSW |= t2
717 mr r3,r1 /* Close enough */
769 Copyright © 1997-1998 by PowerLogix R & D, Inc.
771 This program is free software; you can redistribute it and/or modify
772 it under the terms of the GNU General Public License as published by
773 the Free Software Foundation; either version 2 of the License, or
774 (at your option) any later version.
776 This program is distributed in the hope that it will be useful,
777 but WITHOUT ANY WARRANTY; without even the implied warranty of
778 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
779 GNU General Public License for more details.
781 You should have received a copy of the GNU General Public License
782 along with this program; if not, write to the Free Software
783 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
787 - First public release, contributed by PowerLogix.
789 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
790 Please e-mail updates to this file to me, thanks!
794 When setting the L2CR register, you must do a few special
795 things. If you are enabling the cache, you must perform a
796 global invalidate. If you are disabling the cache, you must
797 flush the cache contents first. This routine takes care of
798 doing these things. When first enabling the cache, make sure
799 you pass in the L2CR you want, as well as passing in the
800 global invalidate bit set. A global invalidate will only be
801 performed if the L2I bit is set in applyThis. When enabling
802 the cache, you should also set the L2E bit in applyThis. If
803 you want to modify the L2CR contents after the cache has been
804 enabled, the recommended procedure is to first call
805 __setL2CR(0) to disable the cache and then call it again with
806 the new values for L2CR. Examples:
808 _setL2CR(0) - disables the cache
809 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
810 - L2E set to turn on the cache
813 - L2RAM set to pipelined synchronous late-write
814 - L2I set to perform a global invalidation
816 - L2DF set because this upgrade card
819 A similar call should work for your card. You need to know
820 the correct setting for your card and then place them in the
821 fields I have outlined above. Other fields support optional
822 features, such as L2DO which caches only data, or L2TS which
823 causes cache pushes from the L1 cache to go to the L2 cache
824 instead of to main memory.
828 /* Make sure this is a 750 chip */
830 rlwinm r4,r4,16,16,31
839 /* Get the current enable bit of the L2CR into r4 */
843 /* See if we want to perform a global inval this time. */
844 rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
845 rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
846 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
847 rlwimi r3,r4,0,0,0 /* Keep the enable bit the same as it was. */
848 bne dontDisableCache /* Only disable the cache if L2CRApply
849 has the enable bit off */
852 /* Disable the cache. First, we turn off interrupts.
853 An interrupt while we are flushing the cache could bring
854 in data which may not get properly flushed. */
855 rlwinm r4,r7,0,17,15 /* Turn off EE bit */
861 Now, read the first 2MB of memory to put new data in the cache.
862 (Actually we only need the size of the L2 cache plus the size
863 of the L1 cache, but 2MB will cover everything just to be safe).
869 addi r4,r4,0x0020 /* Go to start of next cache line */
872 /* Now, flush the first 2MB of memory */
878 addi r4,r4,0x0020 /* Go to start of next cache line */
881 /* Turn off the L2CR enable bit. */
885 /* Set up the L2CR configuration bits */
890 /* Reenable interrupts if necessary. */
897 /* Perform a global invalidation */
903 /* Wait for the invalidation to complete */
905 rlwinm. r4,r3,0,31,31
908 rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
914 /* See if we need to enable the cache */
918 /* Enable the cache */
925 /* Make sure this is a 750 chip */
927 rlwinm r3,r3,16,16,31
934 /* Return the L2CR contents */
938 /* --- End of PowerLogix code ---
953 * These are used in the alignment trap handler when emulating
954 * single-precision loads and stores.
955 * We restore and save the fpscr so the task gets the same result
956 * and exceptions as if the cpu had performed the load or store.
959 #if defined(CONFIG_4xx)
971 lfd 0,-4(r5) /* load up fpscr value */
975 mffs 0 /* save new fpscr value */
980 lfd 0,-4(r5) /* load up fpscr value */
984 mffs 0 /* save new fpscr value */
989 _GLOBAL(__clear_msr_me)
990 mfmsr r0 /* Get current interrupt state */
993 andc r0,r0,r3 /* Clears bit in (r4) */
994 sync /* Some chip revs have problems here */
995 mtmsr r0 /* Update machine state */
999 * Create a kernel thread
1000 * kernel_thread(fn, arg, flags)
1002 _GLOBAL(kernel_thread)
1003 mr r6,r3 /* function */
1004 ori r3,r5,CLONE_VM /* flags */
1007 cmpi 0,r3,0 /* parent or child? */
1008 bnelr /* return if parent */
1009 li r0,0 /* clear out p->thread.regs */
1010 stw r0,THREAD+PT_REGS(r2) /* since we don't have user ctx */
1011 mtlr r6 /* fn addr in lr */
1012 mr r3,r4 /* load arg and call fn */
1014 li r0,__NR_exit /* exit after child exits */
1019 * This routine is just here to keep GCC happy - sigh...
1024 #define SYSCALL(name) \
1026 li r0,__NR_##name; \
1030 stw r3,errno@l(r4); \
1034 #define __NR__exit __NR_exit
1045 SYSCALL(delete_module)
1050 /* Why isn't this a) automatic, b) written in 'C'? */
1053 _GLOBAL(sys_call_table)
1054 .long sys_ni_syscall /* 0 - old "setup()" system call */
1059 .long sys_open /* 5 */
1064 .long sys_unlink /* 10 */
1069 .long sys_chmod /* 15 */
1071 .long sys_ni_syscall /* old break syscall holder */
1074 .long sys_getpid /* 20 */
1079 .long sys_stime /* 25 */
1084 .long sys_utime /* 30 */
1085 .long sys_ni_syscall /* old stty syscall holder */
1086 .long sys_ni_syscall /* old gtty syscall holder */
1089 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1094 .long sys_rmdir /* 40 */
1098 .long sys_ni_syscall /* old prof syscall holder */
1099 .long sys_brk /* 45 */
1104 .long sys_getegid /* 50 */
1106 .long sys_umount /* recycled never used phys() */
1107 .long sys_ni_syscall /* old lock syscall holder */
1109 .long sys_fcntl /* 55 */
1110 .long sys_ni_syscall /* old mpx syscall holder */
1112 .long sys_ni_syscall /* old ulimit syscall holder */
1114 .long sys_umask /* 60 */
1119 .long sys_getpgrp /* 65 */
1124 .long sys_setreuid /* 70 */
1126 .long sys_sigsuspend
1127 .long sys_sigpending
1128 .long sys_sethostname
1129 .long sys_setrlimit /* 75 */
1130 .long sys_old_getrlimit
1132 .long sys_gettimeofday
1133 .long sys_settimeofday
1134 .long sys_getgroups /* 80 */
1139 .long sys_readlink /* 85 */
1144 .long sys_mmap /* 90 */
1149 .long sys_fchown /* 95 */
1150 .long sys_getpriority
1151 .long sys_setpriority
1152 .long sys_ni_syscall /* old profil syscall holder */
1154 .long sys_fstatfs /* 100 */
1156 .long sys_socketcall
1159 .long sys_getitimer /* 105 */
1164 .long sys_iopl /* 110 */
1166 .long sys_ni_syscall /* old 'idle' syscall */
1169 .long sys_swapoff /* 115 */
1174 .long sys_clone /* 120 */
1175 .long sys_setdomainname
1177 .long sys_modify_ldt
1179 .long sys_mprotect /* 125 */
1180 .long sys_sigprocmask
1181 .long sys_create_module
1182 .long sys_init_module
1183 .long sys_delete_module
1184 .long sys_get_kernel_syms /* 130 */
1189 .long sys_sysfs /* 135 */
1190 .long sys_personality
1191 .long sys_ni_syscall /* for afs_syscall */
1194 .long sys_llseek /* 140 */
1199 .long sys_readv /* 145 */
1204 .long sys_mlock /* 150 */
1207 .long sys_munlockall
1208 .long sys_sched_setparam
1209 .long sys_sched_getparam /* 155 */
1210 .long sys_sched_setscheduler
1211 .long sys_sched_getscheduler
1212 .long sys_sched_yield
1213 .long sys_sched_get_priority_max
1214 .long sys_sched_get_priority_min /* 160 */
1215 .long sys_sched_rr_get_interval
1219 .long sys_getresuid /* 165 */
1220 .long sys_query_module
1222 .long sys_nfsservctl
1224 .long sys_getresgid /* 170 */
1226 .long sys_rt_sigreturn
1227 .long sys_rt_sigaction
1228 .long sys_rt_sigprocmask
1229 .long sys_rt_sigpending /* 175 */
1230 .long sys_rt_sigtimedwait
1231 .long sys_rt_sigqueueinfo
1232 .long sys_rt_sigsuspend
1234 .long sys_pwrite /* 180 */
1239 .long sys_sigaltstack /* 185 */
1241 .long sys_ni_syscall /* streams1 */
1242 .long sys_ni_syscall /* streams2 */
1244 .long sys_getrlimit /* 190 */
1245 .long sys_ni_syscall /* 191 */ /* Unused */
1246 .long sys_ni_syscall /* 192 - reserved - mmap2 */
1247 .long sys_ni_syscall /* 193 - reserved - truncate64 */
1248 .long sys_ni_syscall /* 194 - reserved - ftruncate64 */
1249 .long sys_ni_syscall /* 195 - reserved - stat64 */
1250 .long sys_ni_syscall /* 196 - reserved - lstat64 */
1251 .long sys_ni_syscall /* 197 - reserved - fstat64 */
1252 .long sys_pciconfig_read /* 198 */
1253 .long sys_pciconfig_write /* 199 */
1254 .long sys_pciconfig_iobase /* 200 */
1255 .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
1256 .long sys_getdents64 /* 202 */
1257 .rept NR_syscalls-(.-sys_call_table)/4
1258 .long sys_ni_syscall