2 // assembly portion of the IA64 MCA handling
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 // kstack, switch modes, jump to C INIT handler
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 // Before entering virtual mode code:
12 // 1. Check for TLB CPU error
13 // 2. Restore current thread pointer to kr6
14 // 3. Move stack ptr 16 bytes to conform to C calling convention
16 #include <linux/config.h>
18 #include <asm/asmmacro.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/mca_asm.h>
25 * When we get an machine check, the kernel stack pointer is no longer
26 * valid, so we need to set a new stack pointer.
28 #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
31 * Needed for ia64_sal call
33 #define SAL_GET_STATE_INFO 0x01000001
36 * Needed for return context to SAL
38 #define IA64_MCA_SAME_CONTEXT 0x0
39 #define IA64_MCA_COLD_BOOT -2
44 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
46 * 2. GR8 = PAL_PROC physical address
47 * 3. GR9 = SAL_PROC physical address
48 * 4. GR10 = SAL GP (physical)
49 * 5. GR11 = Rendez state
50 * 6. GR12 = Return address to location within SAL_CHECK
52 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
53 movl _tmp=ia64_sal_to_os_handoff_state;; \
54 DATA_VA_TO_PA(_tmp);; \
55 st8 [_tmp]=r1,0x08;; \
56 st8 [_tmp]=r8,0x08;; \
57 st8 [_tmp]=r9,0x08;; \
58 st8 [_tmp]=r10,0x08;; \
59 st8 [_tmp]=r11,0x08;; \
63 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
64 * (p6) is executed if we never entered virtual mode (TLB error)
65 * (p7) is executed if we entered virtual mode as expected (normal case)
66 * 1. GR8 = OS_MCA return status
67 * 2. GR9 = SAL GP (physical)
68 * 3. GR10 = 0/1 returning same/new context
69 * 4. GR22 = New min state save area pointer
70 * returns ptr to SAL rtn save loc in _tmp
72 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
73 (p6) movl _tmp=ia64_sal_to_os_handoff_state;; \
74 (p7) movl _tmp=ia64_os_to_sal_handoff_state;; \
75 DATA_VA_TO_PA(_tmp);; \
76 (p6) movl r8=IA64_MCA_COLD_BOOT; \
77 (p6) movl r10=IA64_MCA_SAME_CONTEXT; \
78 (p6) add _tmp=0x18,_tmp;; \
79 (p6) ld8 r9=[_tmp],0x10; \
80 (p6) movl r22=ia64_mca_min_state_save_info;; \
81 (p7) ld8 r8=[_tmp],0x08;; \
82 (p7) ld8 r9=[_tmp],0x08;; \
83 (p7) ld8 r10=[_tmp],0x08;; \
84 (p7) ld8 r22=[_tmp],0x08;; \
86 // now _tmp is pointing to SAL rtn save location
89 .global ia64_os_mca_dispatch
90 .global ia64_os_mca_dispatch_end
91 .global ia64_sal_to_os_handoff_state
92 .global ia64_os_to_sal_handoff_state
93 .global ia64_mca_proc_state_dump
94 .global ia64_mca_stack
95 .global ia64_mca_stackframe
96 .global ia64_mca_bspstore
97 .global ia64_init_stack
98 .global ia64_mca_sal_data_area
99 .global ia64_tlb_functional
100 .global ia64_mca_min_state_save_info
105 ia64_os_mca_dispatch:
107 #if defined(MCA_TEST)
108 // Pretend that we are in interrupt context
110 dep r2=0, r2, PSR_IC, 2;
112 #endif /* #if defined(MCA_TEST) */
114 // Save the SAL to OS MCA handoff state as defined
116 // NOTE : The order in which the state gets saved
117 // is dependent on the way the C-structure
118 // for ia64_mca_sal_to_os_state_t has been
119 // defined in include/asm/mca.h
120 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
123 // LOG PROCESSOR STATE INFO FROM HERE ON..
125 br ia64_os_mca_proc_state_dump;;
127 ia64_os_mca_done_dump:
129 // Setup new stack frame for OS_MCA handling
130 movl r2=ia64_mca_bspstore;; // local bspstore area location in r2
132 movl r3=ia64_mca_stackframe;; // save stack frame to memory in r3
134 rse_switch_context(r6,r3,r2);; // RSC management in this new context
135 movl r12=ia64_mca_stack
136 mov r2=8*1024;; // stack size must be same as C array
137 add r12=r2,r12;; // stack base @ bottom of array
138 adds r12=-16,r12;; // allow 16 bytes of scratch
139 // (C calling convention)
142 // Check to see if the MCA resulted from a TLB error
143 begin_tlb_error_check:
144 br ia64_os_mca_tlb_error_check;;
146 done_tlb_error_check:
148 // If TLB is functional, enter virtual mode from physical mode
149 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
150 ia64_os_mca_virtual_begin:
153 movl r2=ia64_mca_ucmc_handler;;
155 br.call.sptk.many b0=b6;;
157 // Revert back to physical mode before going back to SAL
158 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
159 ia64_os_mca_virtual_end:
161 #if defined(MCA_TEST)
162 // Pretend that we are in interrupt context
164 dep r2=0, r2, PSR_IC, 2;;
166 #endif /* #if defined(MCA_TEST) */
168 // restore the original stack frame here
169 movl r2=ia64_mca_stackframe // restore stack frame from memory at r2
174 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
176 // let us restore all the registers from our PSI structure
179 begin_os_mca_restore:
180 br ia64_os_mca_proc_state_restore;;
182 ia64_os_mca_done_restore:
183 movl r3=ia64_tlb_functional;;
187 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
188 // branch back to SALE_CHECK
190 mov b0=r3;; // SAL_CHECK return address
193 ia64_os_mca_dispatch_end:
194 //EndMain//////////////////////////////////////////////////////////////////////
199 // ia64_os_mca_proc_state_dump()
203 // This stub dumps the processor state during MCHK to a data area
207 ia64_os_mca_proc_state_dump:
208 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
209 // to virtual addressing mode.
210 movl r2=ia64_mca_proc_state_dump;; // Os state dump area
211 DATA_VA_TO_PA(r2) // convert to to physical address
214 mov r5=ar.unat // ar.unat
216 // save banked GRs 16-31 along with NaT bits
218 st8.spill [r2]=r16,8;;
219 st8.spill [r2]=r17,8;;
220 st8.spill [r2]=r18,8;;
221 st8.spill [r2]=r19,8;;
222 st8.spill [r2]=r20,8;;
223 st8.spill [r2]=r21,8;;
224 st8.spill [r2]=r22,8;;
225 st8.spill [r2]=r23,8;;
226 st8.spill [r2]=r24,8;;
227 st8.spill [r2]=r25,8;;
228 st8.spill [r2]=r26,8;;
229 st8.spill [r2]=r27,8;;
230 st8.spill [r2]=r28,8;;
231 st8.spill [r2]=r29,8;;
232 st8.spill [r2]=r30,8;;
233 st8.spill [r2]=r31,8;;
236 st8 [r2]=r4,8 // save User NaT bits for r16-r31
237 mov ar.unat=r5 // restore original unat
241 add r4=8,r2 // duplicate r2 in r4
242 add r6=2*8,r2 // duplicate r2 in r4
265 add r4=8,r2 // duplicate r2 in r4
266 add r6=2*8,r2 // duplicate r2 in r4
270 mov r7=cr2;; // cr.iva
274 st8 [r6]=r7,3*8;; // 48 byte rements
276 mov r3=cr8;; // cr.pta
277 st8 [r2]=r3,8*8;; // 64 byte rements
279 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
281 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
282 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
283 begin_skip_intr_regs:
284 (p6) br SkipIntrRegs;;
286 add r4=8,r2 // duplicate r2 in r4
287 add r6=2*8,r2 // duplicate r2 in r6
289 mov r3=cr16 // cr.ipsr
290 mov r5=cr17 // cr.isr
291 mov r7=r0;; // cr.ida => cr18 (reserved)
296 mov r3=cr19 // cr.iip
297 mov r5=cr20 // cr.idtr
298 mov r7=cr21;; // cr.iitr
303 mov r3=cr22 // cr.iipa
304 mov r5=cr23 // cr.ifs
305 mov r7=cr24;; // cr.iim
310 mov r3=cr25;; // cr.iha
311 st8 [r2]=r3,160;; // 160 byte rement
314 st8 [r2]=r0,168 // another 168 byte .
316 mov r3=cr66;; // cr.lid
317 st8 [r2]=r3,40 // 40 byte rement
319 mov r3=cr71;; // cr.ivr
322 mov r3=cr72;; // cr.tpr
323 st8 [r2]=r3,24 // 24 byte increment
325 mov r3=r0;; // cr.eoi => cr75
326 st8 [r2]=r3,168 // 168 byte inc.
328 mov r3=r0;; // cr.irr0 => cr96
329 st8 [r2]=r3,16 // 16 byte inc.
331 mov r3=r0;; // cr.irr1 => cr98
332 st8 [r2]=r3,16 // 16 byte inc.
334 mov r3=r0;; // cr.irr2 => cr100
335 st8 [r2]=r3,16 // 16 byte inc
337 mov r3=r0;; // cr.irr3 => cr100
338 st8 [r2]=r3,16 // 16b inc.
340 mov r3=r0;; // cr.itv => cr114
341 st8 [r2]=r3,16 // 16 byte inc.
343 mov r3=r0;; // cr.pmv => cr116
346 mov r3=r0;; // cr.lrr0 => cr117
349 mov r3=r0;; // cr.lrr1 => cr118
352 mov r3=r0;; // cr.cmcv => cr119
357 add r4=8,r2 // duplicate r2 in r4
358 add r6=2*8,r2 // duplicate r2 in r6
362 mov r7=ar2;; // ar.kr2
369 mov r7=ar5;; // ar.kr5
376 mov r7=r0;; // ar.kr8
379 st8 [r6]=r7,10*8;; // rement by 72 bytes
381 mov r3=ar16 // ar.rsc
382 mov ar16=r0 // put RSE in enforced lazy mode
383 mov r5=ar17 // ar.bsp
385 mov r7=ar18;; // ar.bspstore
390 mov r3=ar19;; // ar.rnat
391 st8 [r2]=r3,8*13 // increment by 13x8 bytes
393 mov r3=ar32;; // ar.ccv
396 mov r3=ar36;; // ar.unat
399 mov r3=ar40;; // ar.fpsr
402 mov r3=ar44;; // ar.itc
403 st8 [r2]=r3,160 // 160
405 mov r3=ar64;; // ar.pfs
408 mov r3=ar65;; // ar.lc
411 mov r3=ar66;; // ar.ec
413 add r2=8*62,r2 //padding
423 br.cloop.sptk.few cStRR
426 br ia64_os_mca_done_dump;;
428 //EndStub//////////////////////////////////////////////////////////////////////
433 // ia64_os_mca_proc_state_restore()
437 // This is a stub to restore the saved processor state during MCHK
441 ia64_os_mca_proc_state_restore:
443 // Restore bank1 GR16-31
444 movl r2=ia64_mca_proc_state_dump // Convert virtual address
445 ;; // of OS state dump area
446 DATA_VA_TO_PA(r2) // to physical address
448 restore_GRs: // restore bank-1 GRs 16-31
450 add r3=16*8,r2;; // to get to NaT of GR 16-31
452 mov ar.unat=r3;; // first restore NaT
454 ld8.fill r16=[r2],8;;
455 ld8.fill r17=[r2],8;;
456 ld8.fill r18=[r2],8;;
457 ld8.fill r19=[r2],8;;
458 ld8.fill r20=[r2],8;;
459 ld8.fill r21=[r2],8;;
460 ld8.fill r22=[r2],8;;
461 ld8.fill r23=[r2],8;;
462 ld8.fill r24=[r2],8;;
463 ld8.fill r25=[r2],8;;
464 ld8.fill r26=[r2],8;;
465 ld8.fill r27=[r2],8;;
466 ld8.fill r28=[r2],8;;
467 ld8.fill r29=[r2],8;;
468 ld8.fill r30=[r2],8;;
469 ld8.fill r31=[r2],8;;
471 ld8 r3=[r2],8;; // increment to skip NaT
475 add r4=8,r2 // duplicate r2 in r4
476 add r6=2*8,r2;; // duplicate r2 in r4
498 add r4=8,r2 // duplicate r2 in r4
499 add r6=2*8,r2;; // duplicate r2 in r4
503 ld8 r7=[r6],3*8;; // 48 byte increments
506 mov cr2=r7;; // cr.iva
508 ld8 r3=[r2],8*8;; // 64 byte increments
509 // mov cr8=r3 // cr.pta
512 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
514 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
515 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
517 begin_rskip_intr_regs:
518 (p6) br rSkipIntrRegs;;
520 add r4=8,r2 // duplicate r2 in r4
521 add r6=2*8,r2;; // duplicate r2 in r4
526 mov cr16=r3 // cr.ipsr
527 mov cr17=r5 // cr.isr is read only
528 // mov cr18=r7;; // cr.ida (reserved - don't restore)
533 mov cr19=r3 // cr.iip
534 mov cr20=r5 // cr.idtr
535 mov cr21=r7;; // cr.iitr
540 mov cr22=r3 // cr.iipa
541 mov cr23=r5 // cr.ifs
542 mov cr24=r7 // cr.iim
544 ld8 r3=[r2],160;; // 160 byte increment
545 mov cr25=r3 // cr.iha
548 ld8 r3=[r2],168;; // another 168 byte inc.
550 ld8 r3=[r2],40;; // 40 byte increment
551 mov cr66=r3 // cr.lid
554 // mov cr71=r3 // cr.ivr is read only
555 ld8 r3=[r2],24;; // 24 byte increment
556 mov cr72=r3 // cr.tpr
558 ld8 r3=[r2],168;; // 168 byte inc.
559 // mov cr75=r3 // cr.eoi
561 ld8 r3=[r2],16;; // 16 byte inc.
562 // mov cr96=r3 // cr.irr0 is read only
564 ld8 r3=[r2],16;; // 16 byte inc.
565 // mov cr98=r3 // cr.irr1 is read only
567 ld8 r3=[r2],16;; // 16 byte inc
568 // mov cr100=r3 // cr.irr2 is read only
570 ld8 r3=[r2],16;; // 16b inc.
571 // mov cr102=r3 // cr.irr3 is read only
573 ld8 r3=[r2],16;; // 16 byte inc.
574 // mov cr114=r3 // cr.itv
577 // mov cr116=r3 // cr.pmv
579 // mov cr117=r3 // cr.lrr0
581 // mov cr118=r3 // cr.lrr1
583 // mov cr119=r3 // cr.cmcv
586 add r4=8,r2 // duplicate r2 in r4
587 add r6=2*8,r2;; // duplicate r2 in r4
594 mov ar2=r7;; // ar.kr2
601 mov ar5=r7;; // ar.kr5
608 // mov ar8=r6 // ar.kr8
614 // mov ar16=r3 // ar.rsc
615 // mov ar17=r5 // ar.bsp is read only
616 mov ar16=r0 // make sure that RSE is in enforced lazy mode
618 mov ar18=r7;; // ar.bspstore
621 mov ar19=r9 // ar.rnat
623 mov ar16=r3 // ar.rsc
625 mov ar32=r3 // ar.ccv
628 mov ar36=r3 // ar.unat
631 mov ar40=r3 // ar.fpsr
633 ld8 r3=[r2],160;; // 160
634 // mov ar44=r3 // ar.itc
637 mov ar64=r3 // ar.pfs
644 add r2=8*62,r2;; // padding
652 // mov rr[r4]=r3 // what are its access previledges?
654 br.cloop.sptk.few cStRRr
659 br ia64_os_mca_done_restore;;
661 //EndStub//////////////////////////////////////////////////////////////////////
665 // ia64_os_mca_tlb_error_check()
669 // This stub checks to see if the MCA resulted from a TLB error
673 ia64_os_mca_tlb_error_check:
675 // Retrieve sal data structure for uncorrected MCA
677 // Make the ia64_sal_get_state_info() call
678 movl r4=ia64_mca_sal_data_area;;
681 DATA_VA_TO_PA(r4) // convert to physical address
682 DATA_VA_TO_PA(r7);; // convert to physical address
683 ld8 r7=[r7] // get addr of pdesc from ia64_sal
684 movl r3=SAL_GET_STATE_INFO;;
685 DATA_VA_TO_PA(r7);; // convert to physical address
686 ld8 r8=[r7],8;; // get pdesc function pointer
687 dep r8=0,r8,61,3;; // convert SAL VA to PA
688 ld8 r1=[r7];; // set new (ia64_sal) gp
689 dep r1=0,r1,61,3;; // convert SAL VA to PA
692 alloc r5=ar.pfs,8,0,8,0;; // allocate stack frame for SAL call
693 mov out0=r3 // which SAL proc to call
694 mov out1=r0 // error type == MCA
695 mov out2=r0 // null arg
696 mov out3=r4 // data copy area
697 mov out4=r0 // null arg
698 mov out5=r0 // null arg
699 mov out6=r0 // null arg
700 mov out7=r0;; // null arg
702 br.call.sptk.few b0=b6;;
704 mov r1=r6 // restore gp
705 mov ar.pfs=r5;; // restore ar.pfs
707 movl r6=ia64_tlb_functional;;
708 DATA_VA_TO_PA(r6) // needed later
710 cmp.eq p6,p7=r0,r8;; // check SAL call return address
711 (p7) st8 [r6]=r0 // clear tlb_functional flag
712 (p7) br tlb_failure // error; return to SAL
714 // examine processor error log for type of error
715 add r4=40+24,r4;; // parse past record header (length=40)
716 // and section header (length=24)
717 ld4 r4=[r4] // get valid field of processor log
719 and r5=r4,r5;; // read bits 8-11 of valid field
720 // to determine if we have a TLB error
723 // if no TLB failure, set tlb_functional flag
728 // if no TLB failure, continue with normal virtual mode logging
729 (p6) br done_tlb_error_check
730 // else no point in entering virtual mode for logging
732 br ia64_os_mca_virtual_end
734 //EndStub//////////////////////////////////////////////////////////////////////
737 // ok, the issue here is that we need to save state information so
738 // it can be useable by the kernel debugger and show regs routines.
739 // In order to do this, our best bet is save the current state (plus
740 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
741 // format. This way we can pass it on in a useable format.
745 // SAL to OS entry point for INIT on the monarch processor
746 // This has been defined for registration purposes with SAL
747 // as a part of ia64_mca_init.
749 // When we get here, the following registers have been
750 // set by the SAL for our use
752 // 1. GR1 = OS INIT GP
753 // 2. GR8 = PAL_PROC physical address
754 // 3. GR9 = SAL_PROC physical address
755 // 4. GR10 = SAL GP (physical)
756 // 5. GR11 = Init Reason
757 // 0 = Received INIT for event other than crash dump switch
758 // 1 = Received wakeup at the end of an OS_MCA corrected machine check
759 // 2 = Received INIT dude to CrashDump switch assertion
761 // 6. GR12 = Return address to location within SAL_INIT procedure
764 GLOBAL_ENTRY(ia64_monarch_init_handler)
766 // stash the information the SAL passed to os
767 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
773 adds r3=8,r2 // set up second base pointer
777 // ok, enough should be saved at this point to be dangerous, and supply
778 // information for a dump
779 // We need to switch to Virtual mode before hitting the C functions.
781 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
782 mov r3=psr // get the current psr, minimum enabled at this point
786 movl r3=IVirtual_Switch
788 mov cr.iip=r3 // short return to set the appropriate bits
789 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
795 // We should now be running virtual
797 // Let's call the C handler to get the rest of the state info
799 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
801 adds out0=16,sp // out0 = pointer to pt_regs
804 adds out1=16,sp // out0 = pointer to switch_stack
806 br.call.sptk.many rp=ia64_init_handler
810 br.sptk return_from_init
811 END(ia64_monarch_init_handler)
814 // SAL to OS entry point for INIT on the slave processor
815 // This has been defined for registration purposes with SAL
816 // as a part of ia64_mca_init.
819 GLOBAL_ENTRY(ia64_slave_init_handler)
821 END(ia64_slave_init_handler)