2 * linux/arch/arm/boot/compressed/head.S
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/linkage.h>
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
22 #if defined(CONFIG_DEBUG_ICEDCC)
28 mcr p14, 0, \ch, c0, c5, 0
30 #elif defined(CONFIG_CPU_XSCALE)
34 mcr p14, 0, \ch, c8, c0, 0
40 mcr p14, 0, \ch, c1, c0, 0
46 #include <mach/debug-macro.S>
52 #if defined(CONFIG_ARCH_SA1100)
54 mov \rb, #0x80000000 @ physical base address
55 #ifdef CONFIG_DEBUG_LL_SER3
56 add \rb, \rb, #0x00050000 @ Ser3
58 add \rb, \rb, #0x00010000 @ Ser1
61 #elif defined(CONFIG_ARCH_S3C2410)
64 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
85 .macro debug_reloc_start
88 kphex r6, 8 /* processor id */
90 kphex r7, 8 /* architecture id */
91 #ifdef CONFIG_CPU_CP15
93 mrc p15, 0, r0, c1, c0
94 kphex r0, 8 /* control reg */
97 kphex r5, 8 /* decompressed kernel start */
99 kphex r9, 8 /* decompressed kernel end */
101 kphex r4, 8 /* kernel execution address */
106 .macro debug_reloc_end
108 kphex r5, 8 /* end of kernel */
111 bl memdump /* dump 256 bytes at start of kernel */
115 .section ".start", #alloc, #execinstr
117 * sort out different calling conventions
121 .type start,#function
127 .word 0x016f2818 @ Magic numbers to help the loader
128 .word start @ absolute load/run zImage address
129 .word _edata @ zImage end address
130 1: mov r7, r1 @ save architecture ID
131 mov r8, r2 @ save atags pointer
133 #ifndef __ARM_ARCH_2__
135 * Booting from Angel - need to enter SVC mode and disable
136 * FIQs/IRQs (numeric definitions from angel arm.h source).
137 * We only do this if we were in user mode on entry.
139 mrs r2, cpsr @ get current mode
140 tst r2, #3 @ not user?
142 mov r0, #0x17 @ angel_SWIreason_EnterSVC
143 swi 0x123456 @ angel_SWI_ARM
145 mrs r2, cpsr @ turn off interrupts to
146 orr r2, r2, #0xc0 @ prevent angel from running
149 teqp pc, #0x0c000003 @ turn off interrupts
153 * Note that some cache flushing and other stuff may
154 * be needed here - is there an Angel SWI call for this?
158 * some architecture specific code can be inserted
159 * by the linker here, but it should preserve r7, r8, and r9.
164 ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp}
165 subs r0, r0, r1 @ calculate the delta offset
167 @ if delta is zero, we are
168 beq not_relocated @ running at the address we
172 * We're running at a different address. We need to fix
173 * up various pointers:
174 * r5 - zImage base address
182 #ifndef CONFIG_ZBOOT_ROM
184 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
185 * we need to fix up pointers into the BSS region.
195 * Relocate all entries in the GOT table.
197 1: ldr r1, [r6, #0] @ relocate entries in the GOT
198 add r1, r1, r0 @ table. This fixes up the
199 str r1, [r6], #4 @ C references.
205 * Relocate entries in the GOT table. We only relocate
206 * the entries that are outside the (relocated) BSS region.
208 1: ldr r1, [r6, #0] @ relocate entries in the GOT
209 cmp r1, r2 @ entry < bss_start ||
210 cmphs r3, r1 @ _end < entry
211 addlo r1, r1, r0 @ table. This fixes up the
212 str r1, [r6], #4 @ C references.
217 not_relocated: mov r0, #0
218 1: str r0, [r2], #4 @ clear bss
226 * The C runtime environment should now be setup
227 * sufficiently. Turn the cache on, set up some
228 * pointers, and start decompressing.
232 mov r1, sp @ malloc space above stack
233 add r2, sp, #0x10000 @ 64k max
236 * Check to see if we will overwrite ourselves.
237 * r4 = final kernel address
238 * r5 = start of this image
239 * r2 = end of malloc space (and therefore this image)
242 * r4 + image length <= r5 -> OK
246 sub r3, sp, r5 @ > compressed kernel size
247 add r0, r4, r3, lsl #2 @ allow for 4x expansion
251 mov r5, r2 @ decompress after malloc space
256 add r0, r0, #127 + 128 @ alignment + stack
257 bic r0, r0, #127 @ align the kernel length
259 * r0 = decompressed kernel length
261 * r4 = kernel execution address
262 * r5 = decompressed kernel start
264 * r7 = architecture ID
268 add r1, r5, r0 @ end of decompressed kernel
272 1: ldmia r2!, {r9 - r14} @ copy relocation code
273 stmia r1!, {r9 - r14}
274 ldmia r2!, {r9 - r14}
275 stmia r1!, {r9 - r14}
278 add sp, r1, #128 @ relocate the stack
281 add pc, r5, r0 @ call relocation code
284 * We're not in danger of overwriting ourselves. Do this the simple way.
286 * r4 = kernel execution address
287 * r7 = architecture ID
289 wont_overwrite: mov r0, r4
297 .word __bss_start @ r2
301 .word _got_start @ r6
303 .word user_stack+4096 @ sp
304 LC1: .word reloc_end - reloc_start
307 #ifdef CONFIG_ARCH_RPC
309 params: ldr r0, =params_phys
316 * Turn on the cache. We need to setup some page tables so that we
317 * can have both the I and D caches on.
319 * We place the page tables 16k down from the kernel execution address,
320 * and we hope that nothing else is using it. If we're using it, we
324 * r4 = kernel execution address
326 * r7 = architecture number
328 * r9 = run-time address of "start" (???)
330 * r1, r2, r3, r9, r10, r12 corrupted
331 * This routine must preserve:
335 cache_on: mov r3, #8 @ cache_on function
339 * Initialize the highest priority protection region, PR7
340 * to cover all 32bit address and cacheable and bufferable.
342 __armv4_mpu_cache_on:
343 mov r0, #0x3f @ 4G, the whole
344 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
345 mcr p15, 0, r0, c6, c7, 1
348 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
349 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
350 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
353 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
354 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
357 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
358 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
359 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
360 mrc p15, 0, r0, c1, c0, 0 @ read control reg
361 @ ...I .... ..D. WC.M
362 orr r0, r0, #0x002d @ .... .... ..1. 11.1
363 orr r0, r0, #0x1000 @ ...1 .... .... ....
365 mcr p15, 0, r0, c1, c0, 0 @ write control reg
368 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
369 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
372 __armv3_mpu_cache_on:
373 mov r0, #0x3f @ 4G, the whole
374 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
377 mcr p15, 0, r0, c2, c0, 0 @ cache on
378 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
381 mcr p15, 0, r0, c5, c0, 0 @ access permission
384 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
385 mrc p15, 0, r0, c1, c0, 0 @ read control reg
386 @ .... .... .... WC.M
387 orr r0, r0, #0x000d @ .... .... .... 11.1
389 mcr p15, 0, r0, c1, c0, 0 @ write control reg
391 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
394 __setup_mmu: sub r3, r4, #16384 @ Page directory size
395 bic r3, r3, #0xff @ Align the pointer
398 * Initialise the page tables, turning on the cacheable and bufferable
399 * bits for the RAM area only.
403 mov r9, r9, lsl #18 @ start of RAM
404 add r10, r9, #0x10000000 @ a reasonable RAM size
408 1: cmp r1, r9 @ if virt > start of RAM
409 orrhs r1, r1, #0x0c @ set cacheable, bufferable
410 cmp r1, r10 @ if virt > end of RAM
411 bichs r1, r1, #0x0c @ clear cacheable, bufferable
412 str r1, [r0], #4 @ 1:1 mapping
417 * If ever we are running from Flash, then we surely want the cache
418 * to be enabled also for our execution instance... We map 2MB of it
419 * so there is no map overlap problem for up to 1 MB compressed kernel.
420 * If the execution is in RAM then we would only be duplicating the above.
425 orr r1, r1, r2, lsl #20
426 add r0, r3, r2, lsl #2
433 __armv4_mmu_cache_on:
437 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
438 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
439 mrc p15, 0, r0, c1, c0, 0 @ read control reg
440 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
442 #ifdef CONFIG_CPU_ENDIAN_BE8
443 orr r0, r0, #1 << 25 @ big-endian page tables
445 bl __common_mmu_cache_on
447 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
450 __armv7_mmu_cache_on:
452 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
456 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
458 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
459 mrc p15, 0, r0, c1, c0, 0 @ read control reg
460 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
461 orr r0, r0, #0x003c @ write buffer
462 #ifdef CONFIG_CPU_ENDIAN_BE8
463 orr r0, r0, #1 << 25 @ big-endian page tables
465 orrne r0, r0, #1 @ MMU enabled
467 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
468 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
469 mcr p15, 0, r0, c1, c0, 0 @ load control register
470 mrc p15, 0, r0, c1, c0, 0 @ and read it back
472 mcr p15, 0, r0, c7, c5, 4 @ ISB
479 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
480 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
481 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
482 mrc p15, 0, r0, c1, c0, 0 @ read control reg
483 orr r0, r0, #0x1000 @ I-cache enable
484 bl __common_mmu_cache_on
486 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
493 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
494 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
496 bl __common_mmu_cache_on
498 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
501 __common_mmu_cache_on:
503 orr r0, r0, #0x000d @ Write buffer, mmu
506 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
507 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
509 .align 5 @ cache line aligned
510 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
511 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
512 sub pc, lr, r0, lsr #32 @ properly flush pipeline
515 * All code following this line is relocatable. It is relocated by
516 * the above code to the end of the decompressed kernel image and
517 * executed there. During this time, we have no stacks.
519 * r0 = decompressed kernel length
521 * r4 = kernel execution address
522 * r5 = decompressed kernel start
524 * r7 = architecture ID
529 reloc_start: add r9, r5, r0
530 sub r9, r9, #128 @ do not copy the stack
535 ldmia r5!, {r0, r2, r3, r10 - r14} @ relocate kernel
536 stmia r1!, {r0, r2, r3, r10 - r14}
541 add sp, r1, #128 @ relocate the stack
544 call_kernel: bl cache_clean_flush
546 mov r0, #0 @ must be zero
547 mov r1, r7 @ restore architecture number
548 mov r2, r8 @ restore atags pointer
549 mov pc, r4 @ call kernel
552 * Here follow the relocatable cache support functions for the
553 * various processors. This is a generic hook for locating an
554 * entry and jumping to an instruction at the specified offset
555 * from the start of the block. Please note this is all position
565 call_cache_fn: adr r12, proc_types
566 #ifdef CONFIG_CPU_CP15
567 mrc p15, 0, r6, c0, c0 @ get processor ID
569 ldr r6, =CONFIG_PROCESSOR_ID
571 1: ldr r1, [r12, #0] @ get value
572 ldr r2, [r12, #4] @ get mask
573 eor r1, r1, r6 @ (real ^ match)
575 addeq pc, r12, r3 @ call cache function
580 * Table for cache operations. This is basically:
583 * - 'cache on' method instruction
584 * - 'cache off' method instruction
585 * - 'cache flush' method instruction
587 * We match an entry using: ((real_id ^ match) & mask) == 0
589 * Writethrough caches generally only need 'on' and 'off'
590 * methods. Writeback caches _must_ have the flush method
594 .type proc_types,#object
596 .word 0x41560600 @ ARM6/610
598 b __arm6_mmu_cache_off @ works, but slow
599 b __arm6_mmu_cache_off
601 @ b __arm6_mmu_cache_on @ untested
602 @ b __arm6_mmu_cache_off
603 @ b __armv3_mmu_cache_flush
605 .word 0x00000000 @ old ARM ID
611 .word 0x41007000 @ ARM7/710
613 b __arm7_mmu_cache_off
614 b __arm7_mmu_cache_off
617 .word 0x41807200 @ ARM720T (writethrough)
619 b __armv4_mmu_cache_on
620 b __armv4_mmu_cache_off
623 .word 0x41007400 @ ARM74x
625 b __armv3_mpu_cache_on
626 b __armv3_mpu_cache_off
627 b __armv3_mpu_cache_flush
629 .word 0x41009400 @ ARM94x
631 b __armv4_mpu_cache_on
632 b __armv4_mpu_cache_off
633 b __armv4_mpu_cache_flush
635 .word 0x00007000 @ ARM7 IDs
641 @ Everything from here on will be the new ID system.
643 .word 0x4401a100 @ sa110 / sa1100
645 b __armv4_mmu_cache_on
646 b __armv4_mmu_cache_off
647 b __armv4_mmu_cache_flush
649 .word 0x6901b110 @ sa1110
651 b __armv4_mmu_cache_on
652 b __armv4_mmu_cache_off
653 b __armv4_mmu_cache_flush
656 .word 0xff0ffff0 @ PXA935
657 b __armv4_mmu_cache_on
658 b __armv4_mmu_cache_off
659 b __armv4_mmu_cache_flush
661 .word 0x56158000 @ PXA168
663 b __armv4_mmu_cache_on
664 b __armv4_mmu_cache_off
665 b __armv5tej_mmu_cache_flush
668 .word 0xff0ffff0 @ PXA935
669 b __armv4_mmu_cache_on
670 b __armv4_mmu_cache_off
671 b __armv4_mmu_cache_flush
673 .word 0x56050000 @ Feroceon
675 b __armv4_mmu_cache_on
676 b __armv4_mmu_cache_off
677 b __armv5tej_mmu_cache_flush
679 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
680 /* this conflicts with the standard ARMv5TE entry */
681 .long 0x41009260 @ Old Feroceon
683 b __armv4_mmu_cache_on
684 b __armv4_mmu_cache_off
685 b __armv5tej_mmu_cache_flush
688 .word 0x66015261 @ FA526
691 b __armv4_mmu_cache_off
692 b __fa526_cache_flush
694 @ These match on the architecture ID
696 .word 0x00020000 @ ARMv4T
698 b __armv4_mmu_cache_on
699 b __armv4_mmu_cache_off
700 b __armv4_mmu_cache_flush
702 .word 0x00050000 @ ARMv5TE
704 b __armv4_mmu_cache_on
705 b __armv4_mmu_cache_off
706 b __armv4_mmu_cache_flush
708 .word 0x00060000 @ ARMv5TEJ
710 b __armv4_mmu_cache_on
711 b __armv4_mmu_cache_off
712 b __armv5tej_mmu_cache_flush
714 .word 0x0007b000 @ ARMv6
716 b __armv4_mmu_cache_on
717 b __armv4_mmu_cache_off
718 b __armv6_mmu_cache_flush
720 .word 0x000f0000 @ new CPU Id
722 b __armv7_mmu_cache_on
723 b __armv7_mmu_cache_off
724 b __armv7_mmu_cache_flush
726 .word 0 @ unrecognised type
732 .size proc_types, . - proc_types
735 * Turn off the Cache and MMU. ARMv3 does not support
736 * reading the control register, but ARMv4 does.
738 * On entry, r6 = processor ID
739 * On exit, r0, r1, r2, r3, r12 corrupted
740 * This routine must preserve: r4, r6, r7
743 cache_off: mov r3, #12 @ cache_off function
746 __armv4_mpu_cache_off:
747 mrc p15, 0, r0, c1, c0
749 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
751 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
752 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
753 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
756 __armv3_mpu_cache_off:
757 mrc p15, 0, r0, c1, c0
759 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
761 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
764 __armv4_mmu_cache_off:
765 mrc p15, 0, r0, c1, c0
767 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
769 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
770 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
773 __armv7_mmu_cache_off:
774 mrc p15, 0, r0, c1, c0
776 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
778 bl __armv7_mmu_cache_flush
780 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
781 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
782 mcr p15, 0, r0, c7, c10, 4 @ DSB
783 mcr p15, 0, r0, c7, c5, 4 @ ISB
786 __arm6_mmu_cache_off:
787 mov r0, #0x00000030 @ ARM6 control reg.
788 b __armv3_mmu_cache_off
790 __arm7_mmu_cache_off:
791 mov r0, #0x00000070 @ ARM7 control reg.
792 b __armv3_mmu_cache_off
794 __armv3_mmu_cache_off:
795 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
797 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
798 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
802 * Clean and flush the cache to maintain consistency.
807 * r1, r2, r3, r11, r12 corrupted
808 * This routine must preserve:
816 __armv4_mpu_cache_flush:
819 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
820 mov r1, #7 << 5 @ 8 segments
821 1: orr r3, r1, #63 << 26 @ 64 entries
822 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
823 subs r3, r3, #1 << 26
824 bcs 2b @ entries 63 to 0
826 bcs 1b @ segments 7 to 0
829 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
830 mcr p15, 0, ip, c7, c10, 4 @ drain WB
835 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
836 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
837 mcr p15, 0, r1, c7, c10, 4 @ drain WB
840 __armv6_mmu_cache_flush:
842 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
843 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
844 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
845 mcr p15, 0, r1, c7, c10, 4 @ drain WB
848 __armv7_mmu_cache_flush:
849 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
850 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
853 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
856 mcr p15, 0, r10, c7, c10, 5 @ DMB
857 stmfd sp!, {r0-r5, r7, r9, r11}
858 mrc p15, 1, r0, c0, c0, 1 @ read clidr
859 ands r3, r0, #0x7000000 @ extract loc from clidr
860 mov r3, r3, lsr #23 @ left align loc bit field
861 beq finished @ if loc is 0, then no need to clean
862 mov r10, #0 @ start clean at cache level 0
864 add r2, r10, r10, lsr #1 @ work out 3x current cache level
865 mov r1, r0, lsr r2 @ extract cache type bits from clidr
866 and r1, r1, #7 @ mask of the bits for current cache only
867 cmp r1, #2 @ see what cache we have at this level
868 blt skip @ skip if no cache, or just i-cache
869 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
870 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
871 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
872 and r2, r1, #7 @ extract the length of the cache lines
873 add r2, r2, #4 @ add 4 (line length offset)
875 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
876 clz r5, r4 @ find bit position of way size increment
878 ands r7, r7, r1, lsr #13 @ extract max number of the index size
880 mov r9, r4 @ create working copy of max way size
882 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
883 orr r11, r11, r7, lsl r2 @ factor index number into r11
884 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
885 subs r9, r9, #1 @ decrement the way
887 subs r7, r7, #1 @ decrement the index
890 add r10, r10, #2 @ increment cache number
894 ldmfd sp!, {r0-r5, r7, r9, r11}
895 mov r10, #0 @ swith back to cache level 0
896 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
898 mcr p15, 0, r10, c7, c10, 4 @ DSB
899 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
900 mcr p15, 0, r10, c7, c10, 4 @ DSB
901 mcr p15, 0, r10, c7, c5, 4 @ ISB
904 __armv5tej_mmu_cache_flush:
905 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
907 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
908 mcr p15, 0, r0, c7, c10, 4 @ drain WB
911 __armv4_mmu_cache_flush:
912 mov r2, #64*1024 @ default: 32K dcache size (*2)
913 mov r11, #32 @ default: 32 byte line size
914 mrc p15, 0, r3, c0, c0, 1 @ read cache type
915 teq r3, r6 @ cache ID register present?
920 mov r2, r2, lsl r1 @ base dcache size *2
921 tst r3, #1 << 14 @ test M bit
922 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
926 mov r11, r11, lsl r3 @ cache line size in bytes
928 bic r1, pc, #63 @ align to longest cache line
930 1: ldr r3, [r1], r11 @ s/w flush D cache
934 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
935 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
936 mcr p15, 0, r1, c7, c10, 4 @ drain WB
939 __armv3_mmu_cache_flush:
940 __armv3_mpu_cache_flush:
942 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
946 * Various debugging routines for printing hex characters and
947 * memory, which again must be relocatable.
951 .type phexbuf,#object
953 .size phexbuf, . - phexbuf
955 phex: adr r3, phexbuf
992 2: mov r0, r11, lsl #2
1000 ldr r0, [r12, r11, lsl #2]
1022 .section ".stack", "w"
1023 user_stack: .space 4096